From c5a1c92cc4da92cbacae1d2177bfd7446652f772 Mon Sep 17 00:00:00 2001 From: leandrofars Date: Wed, 5 Jun 2024 20:41:12 -0300 Subject: [PATCH 01/52] feat(frontend): init cwmp and usp devices segregation --- frontend/src/pages/devices/cwmp/[...id].js | 81 +++++ .../src/pages/devices/{ => usp}/[...id].js | 9 +- .../src/sections/devices/cwmp/devices-wifi.js | 326 ++++++++++++++++++ .../src/sections/devices/usp/devices-wifi.js | 132 +++++++ .../overview/overview-latest-orders.js | 30 +- 5 files changed, 565 insertions(+), 13 deletions(-) create mode 100644 frontend/src/pages/devices/cwmp/[...id].js rename frontend/src/pages/devices/{ => usp}/[...id].js (80%) create mode 100644 frontend/src/sections/devices/cwmp/devices-wifi.js create mode 100644 frontend/src/sections/devices/usp/devices-wifi.js diff --git a/frontend/src/pages/devices/cwmp/[...id].js b/frontend/src/pages/devices/cwmp/[...id].js new file mode 100644 index 0000000..92959ea --- /dev/null +++ b/frontend/src/pages/devices/cwmp/[...id].js @@ -0,0 +1,81 @@ +import Head from 'next/head'; +import { Box, Stack, Typography, Container, Unstable_Grid2 as Grid, +Tab, +Tabs, +SvgIcon } from '@mui/material'; +import { Layout as DashboardLayout } from 'src/layouts/dashboard/layout'; +import { useRouter } from 'next/router'; +import { DevicesRPC } from 'src/sections/devices/devices-rpc'; +import { DevicesDiscovery } from 'src/sections/devices/devices-discovery'; +import EnvelopeIcon from '@heroicons/react/24/outline/EnvelopeIcon'; +import MagnifyingGlassIcon from '@heroicons/react/24/solid/MagnifyingGlassIcon'; +import WifiIcon from '@heroicons/react/24/solid/WifiIcon'; +import { useEffect, useState } from 'react'; +import { DevicesWiFi } from 'src/sections/devices/cwmp/devices-wifi'; + +const Page = () => { + const router = useRouter() + + const deviceID = router.query.id[0] + const section = router.query.id[1] + + const sectionHandler = () => { + switch(section){ + case "msg": + return + case "wifi": + return + default: + return

Hello World

+ } + } + + useEffect(()=>{ + console.log("deviceid:",deviceID) + }) + + return( + <> + + + Oktopus | TR-369 + + + + + + + + } iconPosition={"end"} label="Wi-Fi" onClick={()=>{router.push(`/devices/cwmp/${deviceID}/wifi`)}} value={"wifi"}/> + {/* {router.push(`/devices/cwmp/${deviceID}/discovery`)}} icon={} iconPosition={"end"} label="Discover Parameters" /> */} + {router.push(`/devices/cwmp/${deviceID}/msg`)}} icon={} iconPosition={"end"} label="Remote Messages" /> + + + + { + sectionHandler() + } + + + + + ); +}; + +Page.getLayout = (page) => ( + + {page} + +); + +export default Page; \ No newline at end of file diff --git a/frontend/src/pages/devices/[...id].js b/frontend/src/pages/devices/usp/[...id].js similarity index 80% rename from frontend/src/pages/devices/[...id].js rename to frontend/src/pages/devices/usp/[...id].js index 9708a09..715ab96 100644 --- a/frontend/src/pages/devices/[...id].js +++ b/frontend/src/pages/devices/usp/[...id].js @@ -11,6 +11,7 @@ import EnvelopeIcon from '@heroicons/react/24/outline/EnvelopeIcon'; import MagnifyingGlassIcon from '@heroicons/react/24/solid/MagnifyingGlassIcon'; import WifiIcon from '@heroicons/react/24/solid/WifiIcon'; import { useEffect, useState } from 'react'; +import { DevicesWiFi } from 'src/sections/devices/usp/devices-wifi'; const Page = () => { const router = useRouter() @@ -24,6 +25,8 @@ const Page = () => { return case "discovery": return + case "wifi": + return default: return

Hello World

} @@ -55,9 +58,9 @@ const Page = () => { }} mb={3}> - } iconPosition={"end"} label="Wi-Fi" /> - {router.push(`/devices/${deviceID}/discovery`)}} icon={} iconPosition={"end"} label="Discover Parameters" /> - {router.push(`/devices/${deviceID}/msg`)}} icon={} iconPosition={"end"} label="Remote Messages" /> + } iconPosition={"end"} label="Wi-Fi" onClick={()=>{router.push(`/devices/usp/${deviceID}/wifi`)}} value={"wifi"}/> + {router.push(`/devices/usp/${deviceID}/discovery`)}} icon={} iconPosition={"end"} label="Discover Parameters" /> + {router.push(`/devices/usp/${deviceID}/msg`)}} icon={} iconPosition={"end"} label="Remote Messages" /> diff --git a/frontend/src/sections/devices/cwmp/devices-wifi.js b/frontend/src/sections/devices/cwmp/devices-wifi.js new file mode 100644 index 0000000..c5f4389 --- /dev/null +++ b/frontend/src/sections/devices/cwmp/devices-wifi.js @@ -0,0 +1,326 @@ +import { useCallback, useEffect, useState } from 'react'; +import { + Button, + Card, + CardActions, + CardContent, + CardHeader, + Divider, + Stack, + TextField, + InputLabel, + MenuItem, + Select, + FormControl, + SvgIcon, + Dialog, + DialogTitle, + DialogContent, + DialogContentText, + DialogActions, + Box, + IconButton, + Icon, + Checkbox, + FormControlLabel +} from '@mui/material'; +import XMarkIcon from '@heroicons/react/24/outline/XMarkIcon'; +import PaperAirplane from '@heroicons/react/24/solid/PaperAirplaneIcon'; +import Check from '@heroicons/react/24/outline/CheckIcon' +import CircularProgress from '@mui/material/CircularProgress'; +import Backdrop from '@mui/material/Backdrop'; +import { useRouter } from 'next/router'; +import GlobeAltIcon from '@heroicons/react/24/outline/GlobeAltIcon'; + + +export const DevicesWiFi = () => { + + const router = useRouter() + + const [content, setContent] = useState( + [ + { + "path": "InternetGatewayDevice.LANDevice.1.WLANConfiguration.1.", + "name": { + "writable": false, + "value": "wl1" + }, + "ssid": { + "writable": true, + "value": "HUAWEI-TEST-1" + }, + "password": { + "writable": false, + "value": "" + }, + "security": { + "writable": false, + "value": "b/g/n" + }, + "enable": { + "writable": true, + "value": "0" + }, + "status": { + "writable": false, + "value": "Disabled" + } + }, + { + "path": "InternetGatewayDevice.LANDevice.1.WLANConfiguration.2.", + "name": { + "writable": false, + "value": "wl0" + }, + "ssid": { + "writable": true, + "value": "HUAWEI-TEST-1" + }, + "password": { + "writable": false, + "value": "" + }, + "security": { + "writable": false, + "value": "a/n/ac/ax" + }, + "enable": { + "writable": true, + "value": "1" + }, + "status": { + "writable": false, + "value": "Up" + } + }, + { + "path": "InternetGatewayDevice.LANDevice.2.WLANConfiguration.1.", + "name": { + "writable": false, + "value": "wl1.1" + }, + "ssid": { + "writable": true, + "value": "HUAWEI-1BLSP6_Guest" + }, + "password": { + "writable": false, + "value": "" + }, + "security": { + "writable": false, + "value": "b/g/n" + }, + "enable": { + "writable": true, + "value": "0" + }, + "status": { + "writable": false, + "value": "Disabled" + } + }, + { + "path": "InternetGatewayDevice.LANDevice.2.WLANConfiguration.2.", + "name": { + "writable": false, + "value": "wl0.1" + }, + "ssid": { + "writable": true, + "value": "Mobile WiFi" + }, + "password": { + "writable": false, + "value": "" + }, + "security": { + "writable": false, + "value": "a/n/ac/ax" + }, + "enable": { + "writable": true, + "value": "0" + }, + "status": { + "writable": false, + "value": "Disabled" + } + } + ]) + + const fetchWifiData = async () => { + + var myHeaders = new Headers(); + myHeaders.append("Content-Type", "application/json"); + myHeaders.append("Authorization", localStorage.getItem("token")); + + var requestOptions = { + method: 'GET', + headers: myHeaders, + redirect: 'follow' + }; + + fetch(`${process.env.NEXT_PUBLIC_REST_ENDPOINT}/device/${router.query.id[0]}/wifi`, requestOptions) + .then(response => response.text()) + .then(result => { + if (result.status === 401){ + router.push("/auth/login") + } + if (result.status === 404){ + //TODO: set device as offline + return + } + stepContentClasses(result) + }) + .catch(error => console.log('error', error)); + }; + + useEffect(()=>{ + // fetchWifiData() + },[]) + + return ( content && + + { + content.map((item, index) => { + return ( + + + + + } + /> + + + } label="Enabled" /> + + + + + + + + + + ) + }) + } + {/* + + + + } + /> + + + } label="Enabled" /> + + + + + + + + + + + } + /> + + + } label="Enabled" /> + + + Security + + + + + + + + + */} + + + ); +}; diff --git a/frontend/src/sections/devices/usp/devices-wifi.js b/frontend/src/sections/devices/usp/devices-wifi.js new file mode 100644 index 0000000..18d8d3c --- /dev/null +++ b/frontend/src/sections/devices/usp/devices-wifi.js @@ -0,0 +1,132 @@ +import { useCallback, useEffect, useState } from 'react'; +import { + Button, + Card, + CardActions, + CardContent, + CardHeader, + Divider, + Stack, + TextField, + InputLabel, + MenuItem, + Select, + FormControl, + SvgIcon, + Dialog, + DialogTitle, + DialogContent, + DialogContentText, + DialogActions, + Box, + IconButton, + Icon, + Checkbox, + FormControlLabel +} from '@mui/material'; +import XMarkIcon from '@heroicons/react/24/outline/XMarkIcon'; +import PaperAirplane from '@heroicons/react/24/solid/PaperAirplaneIcon'; +import Check from '@heroicons/react/24/outline/CheckIcon' +import CircularProgress from '@mui/material/CircularProgress'; +import Backdrop from '@mui/material/Backdrop'; +import { useRouter } from 'next/router'; +import GlobeAltIcon from '@heroicons/react/24/outline/GlobeAltIcon'; + + +export const DevicesWiFi = () => { + + return ( + + + + + + } + /> + + + } label="Enabled" /> + + + + + + + + + + + } + /> + + + } label="Enabled" /> + + + Security + + + + + + + + + + + + ); +}; diff --git a/frontend/src/sections/overview/overview-latest-orders.js b/frontend/src/sections/overview/overview-latest-orders.js index 4df6fb9..222cbda 100644 --- a/frontend/src/sections/overview/overview-latest-orders.js +++ b/frontend/src/sections/overview/overview-latest-orders.js @@ -38,6 +38,14 @@ const status = (s)=>{ } } +const getDeviceProtocol = (order) => { + if (order.Mqtt == 0 && order.Websockets == 0 && order.Stomp == 0) { + return "cwmp" + }else { + return "usp" + } +} + export const OverviewLatestOrders = (props) => { const { orders = [], sx } = props; @@ -97,17 +105,19 @@ export const OverviewLatestOrders = (props) => { - { order.Mqtt == 0 && order.Websockets == 0 && order.Stomp == 0 ? : } + + + + } ); From 19bcf9c01a70515d667b9e1dc4c6629415350f1a Mon Sep 17 00:00:00 2001 From: leandrofars Date: Sat, 8 Jun 2024 11:41:29 -0300 Subject: [PATCH 02/52] chore(frontend): remove github image in the footer --- frontend/src/layouts/dashboard/layout.js | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/frontend/src/layouts/dashboard/layout.js b/frontend/src/layouts/dashboard/layout.js index 22f42fa..c3d0db7 100644 --- a/frontend/src/layouts/dashboard/layout.js +++ b/frontend/src/layouts/dashboard/layout.js @@ -59,24 +59,6 @@ export const Layout = withAuthGuard((props) => { {children} -
- - github logo - - -
); From ed0deed5a3b0843862450fbc8e01a2c3fb3d09f8 Mon Sep 17 00:00:00 2001 From: leandrofars Date: Sat, 8 Jun 2024 11:42:36 -0300 Subject: [PATCH 03/52] feat(cwmp): logs formatting and switch --- backend/services/acs/internal/bridge/bridge.go | 11 +++++------ backend/services/acs/internal/server/handler/cwmp.go | 4 ++-- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/backend/services/acs/internal/bridge/bridge.go b/backend/services/acs/internal/bridge/bridge.go index aa67db8..9c7ed27 100644 --- a/backend/services/acs/internal/bridge/bridge.go +++ b/backend/services/acs/internal/bridge/bridge.go @@ -44,9 +44,11 @@ func NewBridge( func (b *Bridge) StartBridge() { b.sub(handler.NATS_CWMP_ADAPTER_SUBJECT_PREFIX+"*.api", func(msg *nats.Msg) { - //log.Printf("Received message: %s", string(msg.Data)) - log.Printf("Subject: %s", msg.Subject) - log.Printf("Reply: %s", msg.Reply) + if b.conf.DebugMode { + log.Printf("Received message: %s", string(msg.Data)) + log.Printf("Subject: %s", msg.Subject) + log.Printf("Reply: %s", msg.Reply) + } device := getDeviceFromSubject(msg.Subject) cpe, ok := b.cpes[device] @@ -81,9 +83,6 @@ func (b *Bridge) StartBridge() { return } - //req := cpe.Queue.Dequeue().(handler.Request) - //cpe.Waiting = &req - defer cpe.Queue.Dequeue() select { diff --git a/backend/services/acs/internal/server/handler/cwmp.go b/backend/services/acs/internal/server/handler/cwmp.go index 0d23c6a..4deb85c 100644 --- a/backend/services/acs/internal/server/handler/cwmp.go +++ b/backend/services/acs/internal/server/handler/cwmp.go @@ -104,7 +104,7 @@ func (h *Handler) CwmpHandler(w http.ResponseWriter, r *http.Request) { if cpe.Waiting != nil { - log.Println("CPE was waiting for a response, now received something") + log.Println("ACS was waiting for a response from the CPE, now received something") var e cwmp.SoapEnvelope xml.Unmarshal([]byte(body), &e) @@ -122,7 +122,7 @@ func (h *Handler) CwmpHandler(w http.ResponseWriter, r *http.Request) { log.Println(body) } else { log.Println("Unknown message type") - log.Println("Envelope:", e) + log.Println("Body:", body) msgAnswer(cpe.Waiting.Callback, cpe.Waiting.Time, h.acsConfig.DeviceAnswerTimeout, tmp) } cpe.Waiting = nil From fa047e5e513bd313fd4477a3b3f85c16c4a8cf56 Mon Sep 17 00:00:00 2001 From: leandrofars Date: Sun, 9 Jun 2024 14:30:17 -0300 Subject: [PATCH 04/52] feat(frontend): side-nav show what tab the user is in --- frontend/src/layouts/dashboard/side-nav.js | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/frontend/src/layouts/dashboard/side-nav.js b/frontend/src/layouts/dashboard/side-nav.js index 865fa20..d67d4d5 100644 --- a/frontend/src/layouts/dashboard/side-nav.js +++ b/frontend/src/layouts/dashboard/side-nav.js @@ -24,6 +24,19 @@ export const SideNav = (props) => { const pathname = usePathname(); const lgUp = useMediaQuery((theme) => theme.breakpoints.up('lg')); + const isItemActive = (currentPath, itemPath) => { + if (currentPath === itemPath) { + return true; + } + + if (currentPath.includes(itemPath) && itemPath !== '/') { + return true; + } + + return false; + //TODO: test frontend with color of the landing page + } + const content = ( { }} > {items.map((item) => { - const active = item.path ? (pathname === item.path) : false; + const active = isItemActive(pathname, item.path); return ( Date: Sun, 9 Jun 2024 19:15:29 -0300 Subject: [PATCH 05/52] feat(frontend): wifi screen for cwmp devices --- .../src/sections/devices/cwmp/devices-wifi.js | 326 ++++++++++-------- 1 file changed, 180 insertions(+), 146 deletions(-) diff --git a/frontend/src/sections/devices/cwmp/devices-wifi.js b/frontend/src/sections/devices/cwmp/devices-wifi.js index c5f4389..b08202c 100644 --- a/frontend/src/sections/devices/cwmp/devices-wifi.js +++ b/frontend/src/sections/devices/cwmp/devices-wifi.js @@ -21,133 +21,31 @@ import { Box, IconButton, Icon, + SnackbarContent, + Snackbar, Checkbox, - FormControlLabel + FormControlLabel, + useTheme, } from '@mui/material'; import XMarkIcon from '@heroicons/react/24/outline/XMarkIcon'; -import PaperAirplane from '@heroicons/react/24/solid/PaperAirplaneIcon'; -import Check from '@heroicons/react/24/outline/CheckIcon' +import Check from '@heroicons/react/24/outline/CheckIcon'; +//import ExclamationTriangleIcon from '@heroicons/react/24/solid/ExclamationTriangleIcon'; import CircularProgress from '@mui/material/CircularProgress'; import Backdrop from '@mui/material/Backdrop'; import { useRouter } from 'next/router'; import GlobeAltIcon from '@heroicons/react/24/outline/GlobeAltIcon'; - export const DevicesWiFi = () => { + const theme = useTheme(); const router = useRouter() - const [content, setContent] = useState( - [ - { - "path": "InternetGatewayDevice.LANDevice.1.WLANConfiguration.1.", - "name": { - "writable": false, - "value": "wl1" - }, - "ssid": { - "writable": true, - "value": "HUAWEI-TEST-1" - }, - "password": { - "writable": false, - "value": "" - }, - "security": { - "writable": false, - "value": "b/g/n" - }, - "enable": { - "writable": true, - "value": "0" - }, - "status": { - "writable": false, - "value": "Disabled" - } - }, - { - "path": "InternetGatewayDevice.LANDevice.1.WLANConfiguration.2.", - "name": { - "writable": false, - "value": "wl0" - }, - "ssid": { - "writable": true, - "value": "HUAWEI-TEST-1" - }, - "password": { - "writable": false, - "value": "" - }, - "security": { - "writable": false, - "value": "a/n/ac/ax" - }, - "enable": { - "writable": true, - "value": "1" - }, - "status": { - "writable": false, - "value": "Up" - } - }, - { - "path": "InternetGatewayDevice.LANDevice.2.WLANConfiguration.1.", - "name": { - "writable": false, - "value": "wl1.1" - }, - "ssid": { - "writable": true, - "value": "HUAWEI-1BLSP6_Guest" - }, - "password": { - "writable": false, - "value": "" - }, - "security": { - "writable": false, - "value": "b/g/n" - }, - "enable": { - "writable": true, - "value": "0" - }, - "status": { - "writable": false, - "value": "Disabled" - } - }, - { - "path": "InternetGatewayDevice.LANDevice.2.WLANConfiguration.2.", - "name": { - "writable": false, - "value": "wl0.1" - }, - "ssid": { - "writable": true, - "value": "Mobile WiFi" - }, - "password": { - "writable": false, - "value": "" - }, - "security": { - "writable": false, - "value": "a/n/ac/ax" - }, - "enable": { - "writable": true, - "value": "0" - }, - "status": { - "writable": false, - "value": "Disabled" - } - } - ]) + const [content, setContent] = useState([]) + const [applyContent, setApplyContent] = useState([]) + const [apply, setApply] = useState(false) + + const [errorModal, setErrorModal] = useState(false) + const [errorModalText, setErrorModalText] = useState("") const fetchWifiData = async () => { @@ -162,33 +60,39 @@ export const DevicesWiFi = () => { }; fetch(`${process.env.NEXT_PUBLIC_REST_ENDPOINT}/device/${router.query.id[0]}/wifi`, requestOptions) - .then(response => response.text()) + .then(response => { + if (response.status === 401) { + router.push("/auth/login") + } + return response.json() + }) .then(result => { - if (result.status === 401){ - router.push("/auth/login") - } - if (result.status === 404){ - //TODO: set device as offline - return - } - stepContentClasses(result) + console.log("wifi content", result) + result.map((item) => { + let contentToApply = { + hasChanges: false, + path: item.path, + } + setApplyContent(oldValue => [...oldValue, contentToApply]) + }) + setContent(result) }) .catch(error => console.log('error', error)); }; useEffect(()=>{ - // fetchWifiData() + fetchWifiData() },[]) - return ( content && + return (
- { - content.map((item, index) => { + {content.length > 1 ? + (content.map((item, index) => { return ( { /> - } label="Enabled" /> - { + let enable = e.target.value == 1 ? "1" : "0" + applyContent[index].hasChanges = true + applyContent[index].enable = { + value : enable + } + setApplyContent([...applyContent]) + item.enable.value = enable + }}/>} + label="Enabled" />} + {item.ssid.value != null && + disabled={!item.ssid.writable} + onChange={(e) => { + applyContent[index].hasChanges = true + applyContent[index].ssid = { + value : e.target.value + } + setApplyContent([...applyContent]) + item.ssid.value = e.target.value + }} + />} + {item.securityCapabilities && + />} + {item.password.value != null && + />} + {item.standard.value != null && + } + + theme.zIndex.drawer + 1 }} + open={open} + onClick={handleClose} + > + + + + + + Response + + + { + setAnswer(false); + handleClose; + //setContent(""); + }} + > + + + + + + + + +
+            {content}
+          
+
+
+ + + +
+
+ + ); +}; From 1879a6cd0bb53dc4c87a49cc1bed07d601c9dc12 Mon Sep 17 00:00:00 2001 From: leandrofars Date: Tue, 11 Jun 2024 22:20:46 -0300 Subject: [PATCH 15/52] feat(frontend): cwmp remote messages | closes #266 --- .../src/sections/devices/cwmp/devices-rpc.js | 121 ++++++++++-------- 1 file changed, 66 insertions(+), 55 deletions(-) diff --git a/frontend/src/sections/devices/cwmp/devices-rpc.js b/frontend/src/sections/devices/cwmp/devices-rpc.js index 6da1c33..2fdcad1 100644 --- a/frontend/src/sections/devices/cwmp/devices-rpc.js +++ b/frontend/src/sections/devices/cwmp/devices-rpc.js @@ -38,15 +38,18 @@ const [answer, setAnswer] = useState(false) const [content, setContent] = useState('') const [age, setAge] = useState(2); -const [value, setValue] = useState(`{ - "param_paths": [ - "Device.WiFi.SSID.[Name==wlan0].", - "Device.IP.Interface.*.Alias", - "Device.DeviceInfo.FirmwareImage.*.Alias", - "Device.IP.Interface.1.IPv4Address.1.IPAddress" - ], - "max_depth": 2 -}`) +const [value, setValue] = useState(` + + + + + + InternetGatewayDevice.LANDevice.1.WLANConfiguration.1. + InternetGatewayDevice.LANDevice.1.WLANConfiguration.2. + + + +`) const handleClose = () => { setOpen(false); @@ -103,58 +106,66 @@ const handleOpen = () => { setAge(event.target.value); switch(event.target.value) { case 1: - setValue(`{ - "allow_partial": true, - "create_objs": [ - { - "obj_path": "Device.IP.Interface.", - "param_settings": [ - { - "param": "Alias", - "value": "test", - "required": true - } - ] - } - ] - }`) + setValue(` + + + + + InternetGatewayDevice.LANDevice. + + + + `) break; case 2: - setValue(`{ - "param_paths": [ - "Device.WiFi.SSID.[Name==wlan0].", - "Device.IP.Interface.*.Alias", - "Device.DeviceInfo.FirmwareImage.*.Alias", - "Device.IP.Interface.1.IPv4Address.1.IPAddress" - ], - "max_depth": 2 - }`) + setValue(` + + + + + + InternetGatewayDevice.LANDevice.1.WLANConfiguration.1. + InternetGatewayDevice.LANDevice.1.WLANConfiguration.2. + InternetGatewayDevice.LANDevice.2.WLANConfiguration.2. + InternetGatewayDevice.LANDevice.2.WLANConfiguration.1. + + + + `) break; case 3: setValue(` - { - "allow_partial":true, - "update_objs":[ - { - "obj_path":"Device.IP.Interface.[Alias==pamonha].", - "param_settings":[ - { - "param":"Alias", - "value":"goiaba", - "required":true - } - ] - } - ] - }`) + + + + + + + + InternetGatewayDevice.LANDevice.1.WLANConfiguration.1.Enable + 0 + + + InternetGatewayDevice.LANDevice.1.WLANConfiguration.2.SSID + HUAWEI_TEST-2 + + + LC1309123 + + + `) break; case 4: - setValue(`{ - "allow_partial": true, - "obj_paths": [ - "Device.IP.Interface.3." - ] - }`) + setValue(` + + + + + InternetGatewayDevice.LANDevice.3. + + + + `) break; default: // code block @@ -207,7 +218,7 @@ const handleOpen = () => { onChange={handleChange} value={value} fullWidth - rows="9" + rows="15" />
From b81699903cfa2a5210cbad6615f6f50eb6411a81 Mon Sep 17 00:00:00 2001 From: leandrofars Date: Tue, 11 Jun 2024 22:31:56 -0300 Subject: [PATCH 16/52] feat(frontend): prettify xml + route for cwmp api --- .../src/sections/devices/cwmp/devices-rpc.js | 37 +++++++++++++++---- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/frontend/src/sections/devices/cwmp/devices-rpc.js b/frontend/src/sections/devices/cwmp/devices-rpc.js index 2fdcad1..5406d32 100644 --- a/frontend/src/sections/devices/cwmp/devices-rpc.js +++ b/frontend/src/sections/devices/cwmp/devices-rpc.js @@ -51,13 +51,36 @@ const [value, setValue] = useState(` `) +var prettifyXml = function(sourceXml) +{ + var xmlDoc = new DOMParser().parseFromString(sourceXml, 'application/xml'); + var xsltDoc = new DOMParser().parseFromString([ + // describes how we want to modify the XML - indent everything + '', + ' ', + ' ', // change to just text() to strip space in text nodes + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', + ].join('\n'), 'application/xml'); + + var xsltProcessor = new XSLTProcessor(); + xsltProcessor.importStylesheet(xsltDoc); + var resultDoc = xsltProcessor.transformToDocument(xmlDoc); + var resultXml = new XMLSerializer().serializeToString(resultDoc); + return resultXml; +}; + const handleClose = () => { setOpen(false); }; const handleOpen = () => { setOpen(true); var myHeaders = new Headers(); - myHeaders.append("Content-Type", "application/json"); myHeaders.append("Authorization", localStorage.getItem("token")); var raw = value @@ -73,21 +96,21 @@ const handleOpen = () => { switch(age) { case 1: - method="add" + method="addObject" break; case 2: - method="get" + method="getParameterValues" break; case 3: - method="set" + method="setParameterValues" break; case 4: - method="del" + method="deleteObject" break; } - fetch(`${process.env.NEXT_PUBLIC_REST_ENDPOINT}/device/${router.query.id[0]}/any/${method}`, requestOptions) + fetch(`${process.env.NEXT_PUBLIC_REST_ENDPOINT}/device/cwmp/${router.query.id[0]}/${method}`, requestOptions) .then(response => response.text()) .then(result => { if (result.status === 401){ @@ -95,7 +118,7 @@ const handleOpen = () => { } setOpen(false) setAnswer(true) - let teste = JSON.stringify(JSON.parse(result), null, 2) + let teste = prettifyXml(result) console.log(teste) setContent(teste) }) From 6ad3e094f6b1e0e760dfe2d663f838bae87f08ae Mon Sep 17 00:00:00 2001 From: leandrofars Date: Thu, 13 Jun 2024 12:12:53 -0300 Subject: [PATCH 17/52] fix(deploy): container name typo --- deploy/compose/docker-compose.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/compose/docker-compose.yaml b/deploy/compose/docker-compose.yaml index 559a094..ed8c17a 100644 --- a/deploy/compose/docker-compose.yaml +++ b/deploy/compose/docker-compose.yaml @@ -19,7 +19,7 @@ services: #/* ------------------------ API REST / USP Controller ----------------------- */ controller: image: 'oktopusp/controller' - container_name: controlller + container_name: controller ports: - 8000:8000 depends_on: From 832b2cae7ec549d80f1c765bfcab5654fdda3c03 Mon Sep 17 00:00:00 2001 From: leandrofars Date: Thu, 13 Jun 2024 12:17:08 -0300 Subject: [PATCH 18/52] fix(deploy): release acs service | close #264 --- build/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/build/Makefile b/build/Makefile index a801e79..c87bf00 100644 --- a/build/Makefile +++ b/build/Makefile @@ -41,6 +41,7 @@ release: release-frontend release-backend release-backend: @make release -C ../backend/services/controller/build/ DOCKER_USER=${DOCKER_USER} + @make release -C ../backend/services/acs/build/ DOCKER_USER=${DOCKER_USER} @make release -C ../backend/services/utils/socketio/build/ DOCKER_USER=${DOCKER_USER} @make release -C ../backend/services/mtp/adapter/build/ DOCKER_USER=${DOCKER_USER} @make release -C ../backend/services/mtp/ws-adapter/build/ DOCKER_USER=${DOCKER_USER} From 6f9e0b9e92761b71e3c9ac14e8bec3a9699c3328 Mon Sep 17 00:00:00 2001 From: leandrofars Date: Thu, 13 Jun 2024 12:27:49 -0300 Subject: [PATCH 19/52] chore(frontend): update nextjs version --- frontend/package-lock.json | 9961 ++++++++++++++---------------------- frontend/package.json | 8 +- 2 files changed, 3810 insertions(+), 6159 deletions(-) diff --git a/frontend/package-lock.json b/frontend/package-lock.json index bfabd37..ebfd141 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -20,12 +20,12 @@ "apexcharts": "3.37.0", "date-fns": "2.29.3", "formik": "2.2.9", - "next": "13.5.6", + "next": "^14.2.4", "nprogress": "0.2.0", "prop-types": "15.8.1", - "react": "18.2.0", + "react": "^18.3.1", "react-apexcharts": "1.4.0", - "react-dom": "18.2.0", + "react-dom": "^18.3.1", "simple-peer": "^9.11.1", "simplebar-react": "^3.2.1", "socket.io-client": "^4.6.2", @@ -39,660 +39,126 @@ "@types/react": "18.0.28", "@types/react-dom": "18.0.11", "eslint": "8.34.0", - "eslint-config-next": "13.1.6" - } - }, - "node_modules/@ampproject/remapping": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", - "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", - "dependencies": { - "@jridgewell/gen-mapping": "^0.3.0", - "@jridgewell/trace-mapping": "^0.3.9" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/cli": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/cli/-/cli-7.22.5.tgz", - "integrity": "sha512-N5d7MjzwsQ2wppwjhrsicVDhJSqF9labEP/swYiHhio4Ca2XjEehpgPmerjnLQl7BPE59BLud0PTWGYwqFl/cQ==", - "dependencies": { - "@jridgewell/trace-mapping": "^0.3.17", - "commander": "^4.0.1", - "convert-source-map": "^1.1.0", - "fs-readdir-recursive": "^1.1.0", - "glob": "^7.2.0", - "make-dir": "^2.1.0", - "slash": "^2.0.0" - }, - "bin": { - "babel": "bin/babel.js", - "babel-external-helpers": "bin/babel-external-helpers.js" - }, - "engines": { - "node": ">=6.9.0" - }, - "optionalDependencies": { - "@nicolo-ribaudo/chokidar-2": "2.1.8-no-fsevents.3", - "chokidar": "^3.4.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/cli/node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/@babel/cli/node_modules/slash": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz", - "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==", - "engines": { - "node": ">=6" + "eslint-config-next": "^14.2.4" } }, "node_modules/@babel/code-frame": { - "version": "7.22.13", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", - "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.24.7.tgz", + "integrity": "sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==", "dependencies": { - "@babel/highlight": "^7.22.13", - "chalk": "^2.4.2" + "@babel/highlight": "^7.24.7", + "picocolors": "^1.0.0" }, "engines": { "node": ">=6.9.0" } }, - "node_modules/@babel/code-frame/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/code-frame/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/code-frame/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/@babel/code-frame/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" - }, - "node_modules/@babel/code-frame/node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/@babel/code-frame/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/code-frame/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/compat-data": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.5.tgz", - "integrity": "sha512-4Jc/YuIaYqKnDDz892kPIledykKg12Aw1PYX5i/TY28anJtacvM1Rrr8wbieB9GfEJwlzqT0hUEao0CxEebiDA==", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.22.5.tgz", - "integrity": "sha512-SBuTAjg91A3eKOvD+bPEz3LlhHZRNu1nFOVts9lzDJTXshHTjII0BAtDS3Y2DAkdZdDKWVZGVwkDfc4Clxn1dg==", - "dependencies": { - "@ampproject/remapping": "^2.2.0", - "@babel/code-frame": "^7.22.5", - "@babel/generator": "^7.22.5", - "@babel/helper-compilation-targets": "^7.22.5", - "@babel/helper-module-transforms": "^7.22.5", - "@babel/helpers": "^7.22.5", - "@babel/parser": "^7.22.5", - "@babel/template": "^7.22.5", - "@babel/traverse": "^7.23.2", - "@babel/types": "^7.22.5", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.2.2", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/babel" - } - }, - "node_modules/@babel/core/node_modules/json5": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", - "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", - "bin": { - "json5": "lib/cli.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/@babel/core/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "bin": { - "semver": "bin/semver.js" - } - }, "node_modules/@babel/generator": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz", - "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.24.7.tgz", + "integrity": "sha512-oipXieGC3i45Y1A41t4tAqpnEZWgB/lC6Ehh6+rOviR5XWpTtMmLN+fGjz9vOiNRt0p6RtO6DtD0pdU3vpqdSA==", "dependencies": { - "@babel/types": "^7.23.0", - "@jridgewell/gen-mapping": "^0.3.2", - "@jridgewell/trace-mapping": "^0.3.17", + "@babel/types": "^7.24.7", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", "jsesc": "^2.5.1" }, "engines": { "node": ">=6.9.0" } }, - "node_modules/@babel/helper-annotate-as-pure": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz", - "integrity": "sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==", - "dependencies": { - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.22.5.tgz", - "integrity": "sha512-m1EP3lVOPptR+2DwD125gziZNcmoNSHGmJROKoy87loWUQyJaVXDgpmruWqDARZSmtYQ+Dl25okU8+qhVzuykw==", - "dependencies": { - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-compilation-targets": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.5.tgz", - "integrity": "sha512-Ji+ywpHeuqxB8WDxraCiqR0xfhYjiDE/e6k7FuIaANnoOFxAHskHChz4vA1mJC9Lbm01s1PVAGhQY4FUKSkGZw==", - "dependencies": { - "@babel/compat-data": "^7.22.5", - "@babel/helper-validator-option": "^7.22.5", - "browserslist": "^4.21.3", - "lru-cache": "^5.1.1", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-compilation-targets/node_modules/lru-cache": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", - "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", - "dependencies": { - "yallist": "^3.0.2" - } - }, - "node_modules/@babel/helper-compilation-targets/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/helper-compilation-targets/node_modules/yallist": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", - "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" - }, - "node_modules/@babel/helper-create-class-features-plugin": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.22.5.tgz", - "integrity": "sha512-xkb58MyOYIslxu3gKmVXmjTtUPvBU4odYzbiIQbWwLKIHCsx6UGZGX6F1IznMFVnDdirseUZopzN+ZRt8Xb33Q==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-function-name": "^7.22.5", - "@babel/helper-member-expression-to-functions": "^7.22.5", - "@babel/helper-optimise-call-expression": "^7.22.5", - "@babel/helper-replace-supers": "^7.22.5", - "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.5", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-create-class-features-plugin/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/helper-create-regexp-features-plugin": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.22.5.tgz", - "integrity": "sha512-1VpEFOIbMRaXyDeUwUfmTIxExLwQ+zkW+Bh5zXpApA3oQedBx9v/updixWxnx/bZpKw7u8VxWjb/qWpIcmPq8A==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "regexpu-core": "^5.3.1", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-create-regexp-features-plugin/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/helper-define-polyfill-provider": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.4.0.tgz", - "integrity": "sha512-RnanLx5ETe6aybRi1cO/edaRH+bNYWaryCEmjDDYyNr4wnSzyOp8T0dWipmqVHKEY3AbVKUom50AKSlj1zmKbg==", - "dependencies": { - "@babel/helper-compilation-targets": "^7.17.7", - "@babel/helper-plugin-utils": "^7.16.7", - "debug": "^4.1.1", - "lodash.debounce": "^4.0.8", - "resolve": "^1.14.2", - "semver": "^6.1.2" - }, - "peerDependencies": { - "@babel/core": "^7.4.0-0" - } - }, - "node_modules/@babel/helper-define-polyfill-provider/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "bin": { - "semver": "bin/semver.js" - } - }, "node_modules/@babel/helper-environment-visitor": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", - "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.24.7.tgz", + "integrity": "sha512-DoiN84+4Gnd0ncbBOM9AZENV4a5ZiL39HYMyZJGZ/AZEykHYdJw0wW3kdcsh9/Kn+BRXHLkkklZ51ecPKmI1CQ==", + "dependencies": { + "@babel/types": "^7.24.7" + }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-function-name": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", - "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.24.7.tgz", + "integrity": "sha512-FyoJTsj/PEUWu1/TYRiXTIHc8lbw+TDYkZuoE43opPS5TrI7MyONBE1oNvfguEXAD9yhQRrVBnXdXzSLQl9XnA==", "dependencies": { - "@babel/template": "^7.22.15", - "@babel/types": "^7.23.0" + "@babel/template": "^7.24.7", + "@babel/types": "^7.24.7" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-hoist-variables": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", - "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.24.7.tgz", + "integrity": "sha512-MJJwhkoGy5c4ehfoRyrJ/owKeMl19U54h27YYftT0o2teQ3FJ3nQUf/I3LlJsX4l3qlw7WRXUmiyajvHXoTubQ==", "dependencies": { - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-member-expression-to-functions": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.22.5.tgz", - "integrity": "sha512-aBiH1NKMG0H2cGZqspNvsaBe6wNGjbJjuLy29aU+eDZjSbbN53BaxlpB02xm9v34pLTZ1nIQPFYn2qMZoa5BQQ==", - "dependencies": { - "@babel/types": "^7.22.5" + "@babel/types": "^7.24.7" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-module-imports": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.18.6.tgz", - "integrity": "sha512-0NFvs3VkuSYbFi1x2Vd6tKrywq+z/cLeYC/RJNFrIX/30Bf5aiGYbtvGXolEktzJH8o5E5KJ3tT+nkxuuZFVlA==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.24.7.tgz", + "integrity": "sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==", "dependencies": { - "@babel/types": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-module-transforms": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.22.5.tgz", - "integrity": "sha512-+hGKDt/Ze8GFExiVHno/2dvG5IdstpzCq0y4Qc9OJ25D4q3pKfiIP/4Vp3/JvhDkLKsDK2api3q3fpIgiIF5bw==", - "dependencies": { - "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-module-imports": "^7.22.5", - "@babel/helper-simple-access": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.5", - "@babel/helper-validator-identifier": "^7.22.5", - "@babel/template": "^7.22.5", - "@babel/traverse": "^7.22.5", - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-module-transforms/node_modules/@babel/helper-module-imports": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.5.tgz", - "integrity": "sha512-8Dl6+HD/cKifutF5qGd/8ZJi84QeAKh+CEe1sBzz8UayBBGg1dAIJrdHOcOM5b2MpzWL2yuotJTtGjETq0qjXg==", - "dependencies": { - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-optimise-call-expression": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz", - "integrity": "sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==", - "dependencies": { - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-plugin-utils": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz", - "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-remap-async-to-generator": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.5.tgz", - "integrity": "sha512-cU0Sq1Rf4Z55fgz7haOakIyM7+x/uCFwXpLPaeRzfoUtAEAuUZjZvFPjL/rk5rW693dIgn2hng1W7xbT7lWT4g==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-wrap-function": "^7.22.5", - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-replace-supers": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.22.5.tgz", - "integrity": "sha512-aLdNM5I3kdI/V9xGNyKSF3X/gTyMUBohTZ+/3QdQKAA9vxIiy12E+8E2HoOP1/DjeqU+g6as35QHJNMDDYpuCg==", - "dependencies": { - "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-member-expression-to-functions": "^7.22.5", - "@babel/helper-optimise-call-expression": "^7.22.5", - "@babel/template": "^7.22.5", - "@babel/traverse": "^7.22.5", - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-simple-access": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz", - "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==", - "dependencies": { - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-skip-transparent-expression-wrappers": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz", - "integrity": "sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==", - "dependencies": { - "@babel/types": "^7.22.5" + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-split-export-declaration": { - "version": "7.22.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", - "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.24.7.tgz", + "integrity": "sha512-oy5V7pD+UvfkEATUKvIjvIAH/xCzfsFVw7ygW2SI6NClZzquT+mwdTfgfdbUiceh6iQO0CHtCPsyze/MZ2YbAA==", "dependencies": { - "@babel/types": "^7.22.5" + "@babel/types": "^7.24.7" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-string-parser": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", - "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.24.7.tgz", + "integrity": "sha512-7MbVt6xrwFQbunH2DNQsAP5sTGxfqQtErvBIvIMi6EQnbgUOuVYanvREcmFrOPhoXBrTtjhhP+lW+o5UfK+tDg==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", - "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-validator-option": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.5.tgz", - "integrity": "sha512-R3oB6xlIVKUnxNUxbmgq7pKjxpru24zlimpE8WK47fACIlM0II/Hm1RS8IaOI7NgCr6LNS+jl5l75m20npAziw==", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-wrap-function": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.22.5.tgz", - "integrity": "sha512-bYqLIBSEshYcYQyfks8ewYA8S30yaGSeRslcvKMvoUk6HHPySbxHq9YRi6ghhzEU+yhQv9bP/jXnygkStOcqZw==", - "dependencies": { - "@babel/helper-function-name": "^7.22.5", - "@babel/template": "^7.22.5", - "@babel/traverse": "^7.22.5", - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helpers": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.22.5.tgz", - "integrity": "sha512-pSXRmfE1vzcUIDFQcSGA5Mr+GxBV9oiRKDuDxXvWQQBCh8HoIjs/2DlDB7H8smac1IVrB9/xdXj2N3Wol9Cr+Q==", - "dependencies": { - "@babel/template": "^7.22.5", - "@babel/traverse": "^7.22.5", - "@babel/types": "^7.22.5" - }, + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz", + "integrity": "sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/highlight": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", - "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.24.7.tgz", + "integrity": "sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==", "dependencies": { - "@babel/helper-validator-identifier": "^7.22.20", + "@babel/helper-validator-identifier": "^7.24.7", "chalk": "^2.4.2", - "js-tokens": "^4.0.0" + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" }, "engines": { "node": ">=6.9.0" } }, - "node_modules/@babel/highlight/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/@babel/highlight/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" - }, - "node_modules/@babel/highlight/node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/@babel/highlight/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/@babel/parser": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz", - "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.24.7.tgz", + "integrity": "sha512-9uUYRm6OqQrCqQdG1iCBwBPZgN8ciDBro2nIOFaiRz1/BCxaI7CNvQbDHvsArAC7Tw9Hda/B3U+6ui9u4HWXPw==", "bin": { "parser": "bin/babel-parser.js" }, @@ -700,1406 +166,57 @@ "node": ">=6.0.0" } }, - "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.22.5.tgz", - "integrity": "sha512-NP1M5Rf+u2Gw9qfSO4ihjcTGW5zXTi36ITLd4/EoAcEhIZ0yjMqmftDNl3QC19CX7olhrjpyU454g/2W7X0jvQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.22.5.tgz", - "integrity": "sha512-31Bb65aZaUwqCbWMnZPduIZxCBngHFlzyN6Dq6KAJjtx+lx6ohKHubc61OomYi7XwVD4Ol0XCVz4h+pYFR048g==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", - "@babel/plugin-transform-optional-chaining": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.13.0" - } - }, - "node_modules/@babel/plugin-external-helpers": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-external-helpers/-/plugin-external-helpers-7.22.5.tgz", - "integrity": "sha512-ngnNEWxmykPk82mH4ajZT0qTztr3Je6hrMuKAslZVM8G1YZTENJSYwrIGtt6KOtznug3exmAtF4so/nPqJuA4A==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-class-properties": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.18.6.tgz", - "integrity": "sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ==", - "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-class-properties instead.", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-object-rest-spread": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.20.7.tgz", - "integrity": "sha512-d2S98yCiLxDVmBmE8UjGcfPvNEUbA1U5q5WxaWFUGRzJSVAZqm5W6MbPct0jxnegUZ0niLeNX+IOzEs7wYg9Dg==", - "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-object-rest-spread instead.", - "dependencies": { - "@babel/compat-data": "^7.20.5", - "@babel/helper-compilation-targets": "^7.20.7", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.20.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-private-property-in-object": { - "version": "7.21.0-placeholder-for-preset-env.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", - "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-unicode-property-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.18.6.tgz", - "integrity": "sha512-2BShG/d5yoZyXZfVePH91urL5wTG6ASZU9M4o03lKK8u8UW1y08OMttBSOADTcJrnPMpvDXRG3G8fyLh4ovs8w==", - "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-unicode-property-regex instead.", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=4" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-async-generators": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", - "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-class-properties": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", - "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-class-static-block": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", - "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-dynamic-import": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", - "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-export-namespace-from": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", - "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.3" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-import-assertions": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.22.5.tgz", - "integrity": "sha512-rdV97N7KqsRzeNGoWUOK6yUsWarLjE5Su/Snk9IYPU9CwkWHs4t+rTGOvffTR8XGkJMTAdLfO0xVnXm8wugIJg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-import-attributes": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.22.5.tgz", - "integrity": "sha512-KwvoWDeNKPETmozyFE0P2rOLqh39EoQHNjqizrI5B8Vt0ZNS7M56s7dAiAqbYfiAYOuIzIh96z3iR2ktgu3tEg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-import-meta": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", - "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-json-strings": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", - "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-jsx": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.22.5.tgz", - "integrity": "sha512-gvyP4hZrgrs/wWMaocvxZ44Hw0b3W8Pe+cMxc8V1ULQ07oh8VNbIRaoD1LRZVTvD+0nieDKjfgKg89sD7rrKrg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-logical-assignment-operators": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", - "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", - "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-numeric-separator": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", - "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-object-rest-spread": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", - "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-optional-catch-binding": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", - "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-optional-chaining": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", - "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-private-property-in-object": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", - "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-top-level-await": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", - "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-typescript": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.22.5.tgz", - "integrity": "sha512-1mS2o03i7t1c6VzH6fdQ3OA8tcEIxwG18zIPRp+UY1Ihv6W+XZzBCVxExF9upussPXJ0xE9XRHwMoNs1ep/nRQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-unicode-sets-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", - "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-transform-arrow-functions": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.22.5.tgz", - "integrity": "sha512-26lTNXoVRdAnsaDXPpvCNUq+OVWEVC6bx7Vvz9rC53F2bagUWW4u4ii2+h8Fejfh7RYqPxn+libeFBBck9muEw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-async-generator-functions": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.22.5.tgz", - "integrity": "sha512-gGOEvFzm3fWoyD5uZq7vVTD57pPJ3PczPUD/xCFGjzBpUosnklmXyKnGQbbbGs1NPNPskFex0j93yKbHt0cHyg==", - "dependencies": { - "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-remap-async-to-generator": "^7.22.5", - "@babel/plugin-syntax-async-generators": "^7.8.4" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-async-to-generator": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.22.5.tgz", - "integrity": "sha512-b1A8D8ZzE/VhNDoV1MSJTnpKkCG5bJo+19R4o4oy03zM7ws8yEMK755j61Dc3EyvdysbqH5BOOTquJ7ZX9C6vQ==", - "dependencies": { - "@babel/helper-module-imports": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-remap-async-to-generator": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-async-to-generator/node_modules/@babel/helper-module-imports": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.5.tgz", - "integrity": "sha512-8Dl6+HD/cKifutF5qGd/8ZJi84QeAKh+CEe1sBzz8UayBBGg1dAIJrdHOcOM5b2MpzWL2yuotJTtGjETq0qjXg==", - "dependencies": { - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/plugin-transform-block-scoped-functions": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.22.5.tgz", - "integrity": "sha512-tdXZ2UdknEKQWKJP1KMNmuF5Lx3MymtMN/pvA+p/VEkhK8jVcQ1fzSy8KM9qRYhAf2/lV33hoMPKI/xaI9sADA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-block-scoping": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.22.5.tgz", - "integrity": "sha512-EcACl1i5fSQ6bt+YGuU/XGCeZKStLmyVGytWkpyhCLeQVA0eu6Wtiw92V+I1T/hnezUv7j74dA/Ro69gWcU+hg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-class-properties": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.22.5.tgz", - "integrity": "sha512-nDkQ0NfkOhPTq8YCLiWNxp1+f9fCobEjCb0n8WdbNUBc4IB5V7P1QnX9IjpSoquKrXF5SKojHleVNs2vGeHCHQ==", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-class-static-block": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.22.5.tgz", - "integrity": "sha512-SPToJ5eYZLxlnp1UzdARpOGeC2GbHvr9d/UV0EukuVx8atktg194oe+C5BqQ8jRTkgLRVOPYeXRSBg1IlMoVRA==", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-class-static-block": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.12.0" - } - }, - "node_modules/@babel/plugin-transform-classes": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.22.5.tgz", - "integrity": "sha512-2edQhLfibpWpsVBx2n/GKOz6JdGQvLruZQfGr9l1qes2KQaWswjBzhQF7UDUZMNaMMQeYnQzxwOMPsbYF7wqPQ==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-compilation-targets": "^7.22.5", - "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-function-name": "^7.22.5", - "@babel/helper-optimise-call-expression": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-replace-supers": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.5", - "globals": "^11.1.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-classes/node_modules/globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/plugin-transform-computed-properties": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.22.5.tgz", - "integrity": "sha512-4GHWBgRf0krxPX+AaPtgBAlTgTeZmqDynokHOX7aqqAB4tHs3U2Y02zH6ETFdLZGcg9UQSD1WCmkVrE9ErHeOg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/template": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-destructuring": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.22.5.tgz", - "integrity": "sha512-GfqcFuGW8vnEqTUBM7UtPd5A4q797LTvvwKxXTgRsFjoqaJiEg9deBG6kWeQYkVEL569NpnmpC0Pkr/8BLKGnQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-dotall-regex": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.22.5.tgz", - "integrity": "sha512-5/Yk9QxCQCl+sOIB1WelKnVRxTJDSAIxtJLL2/pqL14ZVlbH0fUQUZa/T5/UnQtBNgghR7mfB8ERBKyKPCi7Vw==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-duplicate-keys": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.22.5.tgz", - "integrity": "sha512-dEnYD+9BBgld5VBXHnF/DbYGp3fqGMsyxKbtD1mDyIA7AkTSpKXFhCVuj/oQVOoALfBs77DudA0BE4d5mcpmqw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-dynamic-import": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.22.5.tgz", - "integrity": "sha512-0MC3ppTB1AMxd8fXjSrbPa7LT9hrImt+/fcj+Pg5YMD7UQyWp/02+JWpdnCymmsXwIx5Z+sYn1bwCn4ZJNvhqQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-dynamic-import": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-exponentiation-operator": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.22.5.tgz", - "integrity": "sha512-vIpJFNM/FjZ4rh1myqIya9jXwrwwgFRHPjT3DkUA9ZLHuzox8jiXkOLvwm1H+PQIP3CqfC++WPKeuDi0Sjdj1g==", - "dependencies": { - "@babel/helper-builder-binary-assignment-operator-visitor": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-export-namespace-from": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.22.5.tgz", - "integrity": "sha512-X4hhm7FRnPgd4nDA4b/5V280xCx6oL7Oob5+9qVS5C13Zq4bh1qq7LU0GgRU6b5dBWBvhGaXYVB4AcN6+ol6vg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-for-of": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.22.5.tgz", - "integrity": "sha512-3kxQjX1dU9uudwSshyLeEipvrLjBCVthCgeTp6CzE/9JYrlAIaeekVxRpCWsDDfYTfRZRoCeZatCQvwo+wvK8A==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-function-name": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.22.5.tgz", - "integrity": "sha512-UIzQNMS0p0HHiQm3oelztj+ECwFnj+ZRV4KnguvlsD2of1whUeM6o7wGNj6oLwcDoAXQ8gEqfgC24D+VdIcevg==", - "dependencies": { - "@babel/helper-compilation-targets": "^7.22.5", - "@babel/helper-function-name": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-json-strings": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.22.5.tgz", - "integrity": "sha512-DuCRB7fu8MyTLbEQd1ew3R85nx/88yMoqo2uPSjevMj3yoN7CDM8jkgrY0wmVxfJZyJ/B9fE1iq7EQppWQmR5A==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-json-strings": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-literals": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.22.5.tgz", - "integrity": "sha512-fTLj4D79M+mepcw3dgFBTIDYpbcB9Sm0bpm4ppXPaO+U+PKFFyV9MGRvS0gvGw62sd10kT5lRMKXAADb9pWy8g==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-logical-assignment-operators": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.22.5.tgz", - "integrity": "sha512-MQQOUW1KL8X0cDWfbwYP+TbVbZm16QmQXJQ+vndPtH/BoO0lOKpVoEDMI7+PskYxH+IiE0tS8xZye0qr1lGzSA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-member-expression-literals": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.22.5.tgz", - "integrity": "sha512-RZEdkNtzzYCFl9SE9ATaUMTj2hqMb4StarOJLrZRbqqU4HSBE7UlBw9WBWQiDzrJZJdUWiMTVDI6Gv/8DPvfew==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-amd": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.22.5.tgz", - "integrity": "sha512-R+PTfLTcYEmb1+kK7FNkhQ1gP4KgjpSO6HfH9+f8/yfp2Nt3ggBjiVpRwmwTlfqZLafYKJACy36yDXlEmI9HjQ==", - "dependencies": { - "@babel/helper-module-transforms": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-commonjs": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.22.5.tgz", - "integrity": "sha512-B4pzOXj+ONRmuaQTg05b3y/4DuFz3WcCNAXPLb2Q0GT0TrGKGxNKV4jwsXts+StaM0LQczZbOpj8o1DLPDJIiA==", - "dependencies": { - "@babel/helper-module-transforms": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-simple-access": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-systemjs": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.22.5.tgz", - "integrity": "sha512-emtEpoaTMsOs6Tzz+nbmcePl6AKVtS1yC4YNAeMun9U8YCsgadPNxnOPQ8GhHFB2qdx+LZu9LgoC0Lthuu05DQ==", - "dependencies": { - "@babel/helper-hoist-variables": "^7.22.5", - "@babel/helper-module-transforms": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-validator-identifier": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-umd": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.22.5.tgz", - "integrity": "sha512-+S6kzefN/E1vkSsKx8kmQuqeQsvCKCd1fraCM7zXm4SFoggI099Tr4G8U81+5gtMdUeMQ4ipdQffbKLX0/7dBQ==", - "dependencies": { - "@babel/helper-module-transforms": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.22.5.tgz", - "integrity": "sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-transform-new-target": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.22.5.tgz", - "integrity": "sha512-AsF7K0Fx/cNKVyk3a+DW0JLo+Ua598/NxMRvxDnkpCIGFh43+h/v2xyhRUYf6oD8gE4QtL83C7zZVghMjHd+iw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.22.5.tgz", - "integrity": "sha512-6CF8g6z1dNYZ/VXok5uYkkBBICHZPiGEl7oDnAx2Mt1hlHVHOSIKWJaXHjQJA5VB43KZnXZDIexMchY4y2PGdA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-numeric-separator": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.22.5.tgz", - "integrity": "sha512-NbslED1/6M+sXiwwtcAB/nieypGw02Ejf4KtDeMkCEpP6gWFMX1wI9WKYua+4oBneCCEmulOkRpwywypVZzs/g==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-numeric-separator": "^7.10.4" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-object-rest-spread": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.22.5.tgz", - "integrity": "sha512-Kk3lyDmEslH9DnvCDA1s1kkd3YWQITiBOHngOtDL9Pt6BZjzqb6hiOlb8VfjiiQJ2unmegBqZu0rx5RxJb5vmQ==", - "dependencies": { - "@babel/compat-data": "^7.22.5", - "@babel/helper-compilation-targets": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-object-super": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.22.5.tgz", - "integrity": "sha512-klXqyaT9trSjIUrcsYIfETAzmOEZL3cBYqOYLJxBHfMFFggmXOv+NYSX/Jbs9mzMVESw/WycLFPRx8ba/b2Ipw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-replace-supers": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-optional-catch-binding": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.22.5.tgz", - "integrity": "sha512-pH8orJahy+hzZje5b8e2QIlBWQvGpelS76C63Z+jhZKsmzfNaPQ+LaW6dcJ9bxTpo1mtXbgHwy765Ro3jftmUg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-optional-chaining": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.22.5.tgz", - "integrity": "sha512-AconbMKOMkyG+xCng2JogMCDcqW8wedQAqpVIL4cOSescZ7+iW8utC6YDZLMCSUIReEA733gzRSaOSXMAt/4WQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", - "@babel/plugin-syntax-optional-chaining": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-parameters": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.22.5.tgz", - "integrity": "sha512-AVkFUBurORBREOmHRKo06FjHYgjrabpdqRSwq6+C7R5iTCZOsM4QbcB27St0a4U6fffyAOqh3s/qEfybAhfivg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-private-methods": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.22.5.tgz", - "integrity": "sha512-PPjh4gyrQnGe97JTalgRGMuU4icsZFnWkzicB/fUtzlKUqvsWBKEpPPfr5a2JiyirZkHxnAqkQMO5Z5B2kK3fA==", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-private-property-in-object": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.22.5.tgz", - "integrity": "sha512-/9xnaTTJcVoBtSSmrVyhtSvO3kbqS2ODoh2juEU72c3aYonNF0OMGiaz2gjukyKM2wBBYJP38S4JiE0Wfb5VMQ==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-create-class-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-property-literals": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.22.5.tgz", - "integrity": "sha512-TiOArgddK3mK/x1Qwf5hay2pxI6wCZnvQqrFSqbtg1GLl2JcNMitVH/YnqjP+M31pLUeTfzY1HAXFDnUBV30rQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-display-name": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.22.5.tgz", - "integrity": "sha512-PVk3WPYudRF5z4GKMEYUrLjPl38fJSKNaEOkFuoprioowGuWN6w2RKznuFNSlJx7pzzXXStPUnNSOEO0jL5EVw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-jsx": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.22.5.tgz", - "integrity": "sha512-rog5gZaVbUip5iWDMTYbVM15XQq+RkUKhET/IHR6oizR+JEoN6CAfTTuHcK4vwUyzca30qqHqEpzBOnaRMWYMA==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-module-imports": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-jsx": "^7.22.5", - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-jsx-development": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.22.5.tgz", - "integrity": "sha512-bDhuzwWMuInwCYeDeMzyi7TaBgRQei6DqxhbyniL7/VG4RSS7HtSL2QbY4eESy1KJqlWt8g3xeEBGPuo+XqC8A==", - "dependencies": { - "@babel/plugin-transform-react-jsx": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-jsx/node_modules/@babel/helper-module-imports": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.5.tgz", - "integrity": "sha512-8Dl6+HD/cKifutF5qGd/8ZJi84QeAKh+CEe1sBzz8UayBBGg1dAIJrdHOcOM5b2MpzWL2yuotJTtGjETq0qjXg==", - "dependencies": { - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/plugin-transform-react-pure-annotations": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.22.5.tgz", - "integrity": "sha512-gP4k85wx09q+brArVinTXhWiyzLl9UpmGva0+mWyKxk6JZequ05x3eUcIUE+FyttPKJFRRVtAvQaJ6YF9h1ZpA==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-regenerator": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.22.5.tgz", - "integrity": "sha512-rR7KePOE7gfEtNTh9Qw+iO3Q/e4DEsoQ+hdvM6QUDH7JRJ5qxq5AA52ZzBWbI5i9lfNuvySgOGP8ZN7LAmaiPw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "regenerator-transform": "^0.15.1" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-reserved-words": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.22.5.tgz", - "integrity": "sha512-DTtGKFRQUDm8svigJzZHzb/2xatPc6TzNvAIJ5GqOKDsGFYgAskjRulbR/vGsPKq3OPqtexnz327qYpP57RFyA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-shorthand-properties": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.22.5.tgz", - "integrity": "sha512-vM4fq9IXHscXVKzDv5itkO1X52SmdFBFcMIBZ2FRn2nqVYqw6dBexUgMvAjHW+KXpPPViD/Yo3GrDEBaRC0QYA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-spread": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.22.5.tgz", - "integrity": "sha512-5ZzDQIGyvN4w8+dMmpohL6MBo+l2G7tfC/O2Dg7/hjpgeWvUx8FzfeOKxGog9IimPa4YekaQ9PlDqTLOljkcxg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-sticky-regex": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.22.5.tgz", - "integrity": "sha512-zf7LuNpHG0iEeiyCNwX4j3gDg1jgt1k3ZdXBKbZSoA3BbGQGvMiSvfbZRR3Dr3aeJe3ooWFZxOOG3IRStYp2Bw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-template-literals": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.22.5.tgz", - "integrity": "sha512-5ciOehRNf+EyUeewo8NkbQiUs4d6ZxiHo6BcBcnFlgiJfu16q0bQUw9Jvo0b0gBKFG1SMhDSjeKXSYuJLeFSMA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-typeof-symbol": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.22.5.tgz", - "integrity": "sha512-bYkI5lMzL4kPii4HHEEChkD0rkc+nvnlR6+o/qdqR6zrm0Sv/nodmyLhlq2DO0YKLUNd2VePmPRjJXSBh9OIdA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-typescript": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.22.5.tgz", - "integrity": "sha512-SMubA9S7Cb5sGSFFUlqxyClTA9zWJ8qGQrppNUm05LtFuN1ELRFNndkix4zUJrC9F+YivWwa1dHMSyo0e0N9dA==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-create-class-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-typescript": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-unicode-escapes": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.22.5.tgz", - "integrity": "sha512-biEmVg1IYB/raUO5wT1tgfacCef15Fbzhkx493D3urBI++6hpJ+RFG4SrWMn0NEZLfvilqKf3QDrRVZHo08FYg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-unicode-property-regex": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.22.5.tgz", - "integrity": "sha512-HCCIb+CbJIAE6sXn5CjFQXMwkCClcOfPCzTlilJ8cUatfzwHlWQkbtV0zD338u9dZskwvuOYTuuaMaA8J5EI5A==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-unicode-regex": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.22.5.tgz", - "integrity": "sha512-028laaOKptN5vHJf9/Arr/HiJekMd41hOEZYvNsrsXqJ7YPYuX2bQxh31fkZzGmq3YqHRJzYFFAVYvKfMPKqyg==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-unicode-sets-regex": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.22.5.tgz", - "integrity": "sha512-lhMfi4FC15j13eKrh3DnYHjpGj6UKQHtNKTbtc1igvAhRy4+kLhV07OpLcsN0VgDEw/MjAvJO4BdMJsHwMhzCg==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/preset-env": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.22.5.tgz", - "integrity": "sha512-fj06hw89dpiZzGZtxn+QybifF07nNiZjZ7sazs2aVDcysAZVGjW7+7iFYxg6GLNM47R/thYfLdrXc+2f11Vi9A==", - "dependencies": { - "@babel/compat-data": "^7.22.5", - "@babel/helper-compilation-targets": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-validator-option": "^7.22.5", - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.22.5", - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.22.5", - "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", - "@babel/plugin-syntax-async-generators": "^7.8.4", - "@babel/plugin-syntax-class-properties": "^7.12.13", - "@babel/plugin-syntax-class-static-block": "^7.14.5", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3", - "@babel/plugin-syntax-import-assertions": "^7.22.5", - "@babel/plugin-syntax-import-attributes": "^7.22.5", - "@babel/plugin-syntax-import-meta": "^7.10.4", - "@babel/plugin-syntax-json-strings": "^7.8.3", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", - "@babel/plugin-syntax-numeric-separator": "^7.10.4", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", - "@babel/plugin-syntax-optional-chaining": "^7.8.3", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5", - "@babel/plugin-syntax-top-level-await": "^7.14.5", - "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", - "@babel/plugin-transform-arrow-functions": "^7.22.5", - "@babel/plugin-transform-async-generator-functions": "^7.22.5", - "@babel/plugin-transform-async-to-generator": "^7.22.5", - "@babel/plugin-transform-block-scoped-functions": "^7.22.5", - "@babel/plugin-transform-block-scoping": "^7.22.5", - "@babel/plugin-transform-class-properties": "^7.22.5", - "@babel/plugin-transform-class-static-block": "^7.22.5", - "@babel/plugin-transform-classes": "^7.22.5", - "@babel/plugin-transform-computed-properties": "^7.22.5", - "@babel/plugin-transform-destructuring": "^7.22.5", - "@babel/plugin-transform-dotall-regex": "^7.22.5", - "@babel/plugin-transform-duplicate-keys": "^7.22.5", - "@babel/plugin-transform-dynamic-import": "^7.22.5", - "@babel/plugin-transform-exponentiation-operator": "^7.22.5", - "@babel/plugin-transform-export-namespace-from": "^7.22.5", - "@babel/plugin-transform-for-of": "^7.22.5", - "@babel/plugin-transform-function-name": "^7.22.5", - "@babel/plugin-transform-json-strings": "^7.22.5", - "@babel/plugin-transform-literals": "^7.22.5", - "@babel/plugin-transform-logical-assignment-operators": "^7.22.5", - "@babel/plugin-transform-member-expression-literals": "^7.22.5", - "@babel/plugin-transform-modules-amd": "^7.22.5", - "@babel/plugin-transform-modules-commonjs": "^7.22.5", - "@babel/plugin-transform-modules-systemjs": "^7.22.5", - "@babel/plugin-transform-modules-umd": "^7.22.5", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.22.5", - "@babel/plugin-transform-new-target": "^7.22.5", - "@babel/plugin-transform-nullish-coalescing-operator": "^7.22.5", - "@babel/plugin-transform-numeric-separator": "^7.22.5", - "@babel/plugin-transform-object-rest-spread": "^7.22.5", - "@babel/plugin-transform-object-super": "^7.22.5", - "@babel/plugin-transform-optional-catch-binding": "^7.22.5", - "@babel/plugin-transform-optional-chaining": "^7.22.5", - "@babel/plugin-transform-parameters": "^7.22.5", - "@babel/plugin-transform-private-methods": "^7.22.5", - "@babel/plugin-transform-private-property-in-object": "^7.22.5", - "@babel/plugin-transform-property-literals": "^7.22.5", - "@babel/plugin-transform-regenerator": "^7.22.5", - "@babel/plugin-transform-reserved-words": "^7.22.5", - "@babel/plugin-transform-shorthand-properties": "^7.22.5", - "@babel/plugin-transform-spread": "^7.22.5", - "@babel/plugin-transform-sticky-regex": "^7.22.5", - "@babel/plugin-transform-template-literals": "^7.22.5", - "@babel/plugin-transform-typeof-symbol": "^7.22.5", - "@babel/plugin-transform-unicode-escapes": "^7.22.5", - "@babel/plugin-transform-unicode-property-regex": "^7.22.5", - "@babel/plugin-transform-unicode-regex": "^7.22.5", - "@babel/plugin-transform-unicode-sets-regex": "^7.22.5", - "@babel/preset-modules": "^0.1.5", - "@babel/types": "^7.22.5", - "babel-plugin-polyfill-corejs2": "^0.4.3", - "babel-plugin-polyfill-corejs3": "^0.8.1", - "babel-plugin-polyfill-regenerator": "^0.5.0", - "core-js-compat": "^3.30.2", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/preset-env/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/preset-modules": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.5.tgz", - "integrity": "sha512-A57th6YRG7oR3cq/yt/Y84MvGgE0eJG2F1JLhKuyG+jFxEgrd/HAMJatiFtmOiZurz+0DkrvbheCLaV5f2JfjA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.0.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.4.4", - "@babel/plugin-transform-dotall-regex": "^7.4.4", - "@babel/types": "^7.4.4", - "esutils": "^2.0.2" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/preset-react": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.22.5.tgz", - "integrity": "sha512-M+Is3WikOpEJHgR385HbuCITPTaPRaNkibTEa9oiofmJvIsrceb4yp9RL9Kb+TE8LznmeyZqpP+Lopwcx59xPQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-validator-option": "^7.22.5", - "@babel/plugin-transform-react-display-name": "^7.22.5", - "@babel/plugin-transform-react-jsx": "^7.22.5", - "@babel/plugin-transform-react-jsx-development": "^7.22.5", - "@babel/plugin-transform-react-pure-annotations": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/preset-typescript": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.22.5.tgz", - "integrity": "sha512-YbPaal9LxztSGhmndR46FmAbkJ/1fAsw293tSU+I5E5h+cnJ3d4GTwyUgGYmOXJYdGA+uNePle4qbaRzj2NISQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-validator-option": "^7.22.5", - "@babel/plugin-syntax-jsx": "^7.22.5", - "@babel/plugin-transform-modules-commonjs": "^7.22.5", - "@babel/plugin-transform-typescript": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/regjsgen": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz", - "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==" - }, "node_modules/@babel/runtime": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.21.0.tgz", - "integrity": "sha512-xwII0//EObnq89Ji5AKYQaRYiW/nZ3llSv29d49IuxPhKbtJoLP+9QUUZ4nVragQVtaVGeZrpB+ZtG/Pdy/POw==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.7.tgz", + "integrity": "sha512-UwgBRMjJP+xv857DCngvqXI3Iq6J4v0wXmwc6sapg+zyhbwmQX67LUEFrkK5tbyJ30jGuG3ZvWpBiB9LCy1kWw==", "dependencies": { - "regenerator-runtime": "^0.13.11" + "regenerator-runtime": "^0.14.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/template": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", - "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.24.7.tgz", + "integrity": "sha512-jYqfPrU9JTF0PmPy1tLYHW4Mp4KlgxJD9l2nP9fD6yT/ICi554DmrWBAEYpIelzjHf1msDP3PxJIRt/nFNfBig==", "dependencies": { - "@babel/code-frame": "^7.22.13", - "@babel/parser": "^7.22.15", - "@babel/types": "^7.22.15" + "@babel/code-frame": "^7.24.7", + "@babel/parser": "^7.24.7", + "@babel/types": "^7.24.7" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.23.2", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", - "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.24.7.tgz", + "integrity": "sha512-yb65Ed5S/QAcewNPh0nZczy9JdYXkkAbIsEo+P7BE7yO3txAY30Y/oPa3QkQ5It3xVG2kpKMg9MsdxZaO31uKA==", "dependencies": { - "@babel/code-frame": "^7.22.13", - "@babel/generator": "^7.23.0", - "@babel/helper-environment-visitor": "^7.22.20", - "@babel/helper-function-name": "^7.23.0", - "@babel/helper-hoist-variables": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.6", - "@babel/parser": "^7.23.0", - "@babel/types": "^7.23.0", - "debug": "^4.1.0", + "@babel/code-frame": "^7.24.7", + "@babel/generator": "^7.24.7", + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-function-name": "^7.24.7", + "@babel/helper-hoist-variables": "^7.24.7", + "@babel/helper-split-export-declaration": "^7.24.7", + "@babel/parser": "^7.24.7", + "@babel/types": "^7.24.7", + "debug": "^4.3.1", "globals": "^11.1.0" }, "engines": { "node": ">=6.9.0" } }, - "node_modules/@babel/traverse/node_modules/globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", - "engines": { - "node": ">=4" - } - }, "node_modules/@babel/types": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", - "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.24.7.tgz", + "integrity": "sha512-XEFXSlxiG5td2EJRe8vOmRbaXVgfcBlszKujvVmWIK/UpywWljQCfzAv3RQCGujWQ1RD4YYWEAqDXfuJiy8f5Q==", "dependencies": { - "@babel/helper-string-parser": "^7.22.5", - "@babel/helper-validator-identifier": "^7.22.20", + "@babel/helper-string-parser": "^7.24.7", + "@babel/helper-validator-identifier": "^7.24.7", "to-fast-properties": "^2.0.0" }, "engines": { @@ -2107,16 +224,16 @@ } }, "node_modules/@date-io/core": { - "version": "2.16.0", - "resolved": "https://registry.npmjs.org/@date-io/core/-/core-2.16.0.tgz", - "integrity": "sha512-DYmSzkr+jToahwWrsiRA2/pzMEtz9Bq1euJwoOuYwuwIYXnZFtHajY2E6a1VNVDc9jP8YUXK1BvnZH9mmT19Zg==" + "version": "2.17.0", + "resolved": "https://registry.npmjs.org/@date-io/core/-/core-2.17.0.tgz", + "integrity": "sha512-+EQE8xZhRM/hsY0CDTVyayMDDY5ihc4MqXCrPxooKw19yAzUIC6uUqsZeaOFNL9YKTNxYKrJP5DFgE8o5xRCOw==" }, "node_modules/@date-io/date-fns": { - "version": "2.16.0", - "resolved": "https://registry.npmjs.org/@date-io/date-fns/-/date-fns-2.16.0.tgz", - "integrity": "sha512-bfm5FJjucqlrnQcXDVU5RD+nlGmL3iWgkHTq3uAZWVIuBu6dDmGa3m8a6zo2VQQpu8ambq9H22UyUpn7590joA==", + "version": "2.17.0", + "resolved": "https://registry.npmjs.org/@date-io/date-fns/-/date-fns-2.17.0.tgz", + "integrity": "sha512-L0hWZ/mTpy3Gx/xXJ5tq5CzHo0L7ry6KEO9/w/JWiFWFLZgiNVo3ex92gOl3zmzjHqY/3Ev+5sehAr8UnGLEng==", "dependencies": { - "@date-io/core": "^2.16.0" + "@date-io/core": "^2.17.0" }, "peerDependencies": { "date-fns": "^2.0.0" @@ -2128,11 +245,11 @@ } }, "node_modules/@date-io/dayjs": { - "version": "2.16.0", - "resolved": "https://registry.npmjs.org/@date-io/dayjs/-/dayjs-2.16.0.tgz", - "integrity": "sha512-y5qKyX2j/HG3zMvIxTobYZRGnd1FUW2olZLS0vTj7bEkBQkjd2RO7/FEwDY03Z1geVGlXKnzIATEVBVaGzV4Iw==", + "version": "2.17.0", + "resolved": "https://registry.npmjs.org/@date-io/dayjs/-/dayjs-2.17.0.tgz", + "integrity": "sha512-Iq1wjY5XzBh0lheFA0it6Dsyv94e8mTiNR8vuTai+KopxDkreL3YjwTmZHxkgB7/vd0RMIACStzVgWvPATnDCA==", "dependencies": { - "@date-io/core": "^2.16.0" + "@date-io/core": "^2.17.0" }, "peerDependencies": { "dayjs": "^1.8.17" @@ -2144,11 +261,11 @@ } }, "node_modules/@date-io/luxon": { - "version": "2.16.1", - "resolved": "https://registry.npmjs.org/@date-io/luxon/-/luxon-2.16.1.tgz", - "integrity": "sha512-aeYp5K9PSHV28946pC+9UKUi/xMMYoaGelrpDibZSgHu2VWHXrr7zWLEr+pMPThSs5vt8Ei365PO+84pCm37WQ==", + "version": "2.17.0", + "resolved": "https://registry.npmjs.org/@date-io/luxon/-/luxon-2.17.0.tgz", + "integrity": "sha512-l712Vdm/uTddD2XWt9TlQloZUiTiRQtY5TCOG45MQ/8u0tu8M17BD6QYHar/3OrnkGybALAMPzCy1r5D7+0HBg==", "dependencies": { - "@date-io/core": "^2.16.0" + "@date-io/core": "^2.17.0" }, "peerDependencies": { "luxon": "^1.21.3 || ^2.x || ^3.x" @@ -2160,11 +277,11 @@ } }, "node_modules/@date-io/moment": { - "version": "2.16.1", - "resolved": "https://registry.npmjs.org/@date-io/moment/-/moment-2.16.1.tgz", - "integrity": "sha512-JkxldQxUqZBfZtsaCcCMkm/dmytdyq5pS1RxshCQ4fHhsvP5A7gSqPD22QbVXMcJydi3d3v1Y8BQdUKEuGACZQ==", + "version": "2.17.0", + "resolved": "https://registry.npmjs.org/@date-io/moment/-/moment-2.17.0.tgz", + "integrity": "sha512-e4nb4CDZU4k0WRVhz1Wvl7d+hFsedObSauDHKtZwU9kt7gdYEAzKgnrSCTHsEaXrDumdrkCYTeZ0Tmyk7uV4tw==", "dependencies": { - "@date-io/core": "^2.16.0" + "@date-io/core": "^2.17.0" }, "peerDependencies": { "moment": "^2.24.0" @@ -2176,23 +293,28 @@ } }, "node_modules/@emotion/babel-plugin": { - "version": "11.10.6", - "resolved": "https://registry.npmjs.org/@emotion/babel-plugin/-/babel-plugin-11.10.6.tgz", - "integrity": "sha512-p2dAqtVrkhSa7xz1u/m9eHYdLi+en8NowrmXeF/dKtJpU8lCWli8RUAati7NcSl0afsBott48pdnANuD0wh9QQ==", + "version": "11.11.0", + "resolved": "https://registry.npmjs.org/@emotion/babel-plugin/-/babel-plugin-11.11.0.tgz", + "integrity": "sha512-m4HEDZleaaCH+XgDDsPF15Ht6wTLsgDTeR3WYj9Q/k76JtWhrJjcP4+/XlG8LGT/Rol9qUfOIztXeA84ATpqPQ==", "dependencies": { "@babel/helper-module-imports": "^7.16.7", "@babel/runtime": "^7.18.3", - "@emotion/hash": "^0.9.0", - "@emotion/memoize": "^0.8.0", - "@emotion/serialize": "^1.1.1", + "@emotion/hash": "^0.9.1", + "@emotion/memoize": "^0.8.1", + "@emotion/serialize": "^1.1.2", "babel-plugin-macros": "^3.1.0", "convert-source-map": "^1.5.0", "escape-string-regexp": "^4.0.0", "find-root": "^1.1.0", "source-map": "^0.5.7", - "stylis": "4.1.3" + "stylis": "4.2.0" } }, + "node_modules/@emotion/babel-plugin/node_modules/stylis": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", + "integrity": "sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==" + }, "node_modules/@emotion/cache": { "version": "11.10.5", "resolved": "https://registry.npmjs.org/@emotion/cache/-/cache-11.10.5.tgz", @@ -2206,22 +328,22 @@ } }, "node_modules/@emotion/hash": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.9.0.tgz", - "integrity": "sha512-14FtKiHhy2QoPIzdTcvh//8OyBlknNs2nXRwIhG904opCby3l+9Xaf/wuPvICBF0rc1ZCNBd3nKe9cd2mecVkQ==" + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.9.1.tgz", + "integrity": "sha512-gJB6HLm5rYwSLI6PQa+X1t5CFGrv1J1TWG+sOyMCeKz2ojaj6Fnl/rZEspogG+cvqbt4AE/2eIyD2QfLKTBNlQ==" }, "node_modules/@emotion/is-prop-valid": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@emotion/is-prop-valid/-/is-prop-valid-1.2.0.tgz", - "integrity": "sha512-3aDpDprjM0AwaxGE09bOPkNxHpBd+kA6jty3RnaEXdweX1DF1U3VQpPYb0g1IStAuK7SVQ1cy+bNBBKp4W3Fjg==", + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@emotion/is-prop-valid/-/is-prop-valid-1.2.2.tgz", + "integrity": "sha512-uNsoYd37AFmaCdXlg6EYD1KaPOaRWRByMCYzbKUX4+hhMfrxdVSelShywL4JVaAeM/eHUOSprYBQls+/neX3pw==", "dependencies": { - "@emotion/memoize": "^0.8.0" + "@emotion/memoize": "^0.8.1" } }, "node_modules/@emotion/memoize": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.8.0.tgz", - "integrity": "sha512-G/YwXTkv7Den9mXDO7AhLWkE3q+I92B+VqAE+dYG4NGPaHZGvt3G8Q0p9vmE+sq7rTGphUbAvmQ9YpbfMQGGlA==" + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.8.1.tgz", + "integrity": "sha512-W2P2c/VRW1/1tLox0mVUalvnWXxavmv/Oum2aPsRcoDJuob75FC3Y8FbpfLwUegRcxINtGUMPq0tFCvYNTBXNA==" }, "node_modules/@emotion/react": { "version": "11.10.6", @@ -2247,14 +369,14 @@ } }, "node_modules/@emotion/serialize": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@emotion/serialize/-/serialize-1.1.1.tgz", - "integrity": "sha512-Zl/0LFggN7+L1liljxXdsVSVlg6E/Z/olVWpfxUTxOAmi8NU7YoeWeLfi1RmnB2TATHoaWwIBRoL+FvAJiTUQA==", + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/@emotion/serialize/-/serialize-1.1.4.tgz", + "integrity": "sha512-RIN04MBT8g+FnDwgvIUi8czvr1LU1alUMI05LekWB5DGyTm8cCBMCRpq3GqaiyEDRptEXOyXnvZ58GZYu4kBxQ==", "dependencies": { - "@emotion/hash": "^0.9.0", - "@emotion/memoize": "^0.8.0", - "@emotion/unitless": "^0.8.0", - "@emotion/utils": "^1.2.0", + "@emotion/hash": "^0.9.1", + "@emotion/memoize": "^0.8.1", + "@emotion/unitless": "^0.8.1", + "@emotion/utils": "^1.2.1", "csstype": "^3.0.2" } }, @@ -2278,9 +400,9 @@ } }, "node_modules/@emotion/sheet": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@emotion/sheet/-/sheet-1.2.1.tgz", - "integrity": "sha512-zxRBwl93sHMsOj4zs+OslQKg/uhF38MB+OMKoCrVuS0nyTkqnau+BM3WGEoOptg9Oz45T/aIGs1qbVAsEFo3nA==" + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@emotion/sheet/-/sheet-1.2.2.tgz", + "integrity": "sha512-0QBtGvaqtWi+nx6doRwDdBIzhNdZrXUppvTM4dtZZWEGTXL/XE/yJxLMGlDT1Gt+UHH5IX1n+jkXyytE/av7OA==" }, "node_modules/@emotion/styled": { "version": "11.10.6", @@ -2305,27 +427,27 @@ } }, "node_modules/@emotion/unitless": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.8.0.tgz", - "integrity": "sha512-VINS5vEYAscRl2ZUDiT3uMPlrFQupiKgHz5AA4bCH1miKBg4qtwkim1qPmJj/4WG6TreYMY111rEFsjupcOKHw==" + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.8.1.tgz", + "integrity": "sha512-KOEGMu6dmJZtpadb476IsZBclKvILjopjUii3V+7MnXIQCYh8W3NgNcgwo21n9LXZX6EDIKvqfjYxXebDwxKmQ==" }, "node_modules/@emotion/use-insertion-effect-with-fallbacks": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.0.0.tgz", - "integrity": "sha512-1eEgUGmkaljiBnRMTdksDV1W4kUnmwgp7X9G8B++9GYwl1lUdqSndSriIrTJ0N7LQaoauY9JJ2yhiOYK5+NI4A==", + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.0.1.tgz", + "integrity": "sha512-jT/qyKZ9rzLErtrjGgdkMBn2OP8wl0G3sQlBb3YPryvKHsjvINUhVaPFfP+fpBcOkmrVOVEEHQFJ7nbj2TH2gw==", "peerDependencies": { "react": ">=16.8.0" } }, "node_modules/@emotion/utils": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@emotion/utils/-/utils-1.2.0.tgz", - "integrity": "sha512-sn3WH53Kzpw8oQ5mgMmIzzyAaH2ZqFEbozVVBSYp538E06OSE6ytOp7pRAjNQR+Q/orwqdQYJSe2m3hCOeznkw==" + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@emotion/utils/-/utils-1.2.1.tgz", + "integrity": "sha512-Y2tGf3I+XVnajdItskUCn6LX+VUDmP6lTL4fcqsXAv43dnlbZiuW4MWQW38rW/BVWSE7Q/7+XQocmpnRYILUmg==" }, "node_modules/@emotion/weak-memoize": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/@emotion/weak-memoize/-/weak-memoize-0.3.0.tgz", - "integrity": "sha512-AHPmaAx+RYfZz0eYu6Gviiagpmiyw98ySSlQvCUhVGDRtDFe4DBS0x1bSjdF3gqUDYOczB+yYvBTtEylYSdRhg==" + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/@emotion/weak-memoize/-/weak-memoize-0.3.1.tgz", + "integrity": "sha512-EsBwpc7hBUJWAsNPBmJy4hxWx12v6bshQsldrVmjxJoc3isbxhOrF2IcCpaXxfvq03NwkI7sbsOLXbYuqF/8Ww==" }, "node_modules/@eslint/eslintrc": { "version": "1.4.1", @@ -2350,6 +472,33 @@ "url": "https://opencollective.com/eslint" } }, + "node_modules/@eslint/eslintrc/node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@eslint/eslintrc/node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/@heroicons/react": { "version": "2.0.16", "resolved": "https://registry.npmjs.org/@heroicons/react/-/react-2.0.16.tgz", @@ -2359,13 +508,14 @@ } }, "node_modules/@humanwhocodes/config-array": { - "version": "0.11.8", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.8.tgz", - "integrity": "sha512-UybHIJzJnR5Qc/MsD9Kr+RpO2h+/P1GhOwdiLPXK5TWk5sgTdu88bTD9UP+CKbPPh5Rni1u0GjAdYQLemG8g+g==", + "version": "0.11.14", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", + "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", + "deprecated": "Use @eslint/config-array instead", "dev": true, "dependencies": { - "@humanwhocodes/object-schema": "^1.2.1", - "debug": "^4.1.1", + "@humanwhocodes/object-schema": "^2.0.2", + "debug": "^4.3.1", "minimatch": "^3.0.5" }, "engines": { @@ -2386,52 +536,97 @@ } }, "node_modules/@humanwhocodes/object-schema": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz", - "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", + "deprecated": "Use @eslint/object-schema instead", "dev": true }, - "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", - "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dev": true, "dependencies": { - "@jridgewell/set-array": "^1.0.1", + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", + "dependencies": { + "@jridgewell/set-array": "^1.2.1", "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.9" + "@jridgewell/trace-mapping": "^0.3.24" }, "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/resolve-uri": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz", - "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/set-array": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", - "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.4.14", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz", - "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==" + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.18", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.18.tgz", - "integrity": "sha512-w+niJYzMHdd7USdiH2U6869nqhD2nbfZXND5Yp93qIbEmnDNk7PD48o+YchRVpzMU7M6jVCbenTR7PA1FLQ9pA==", + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", "dependencies": { - "@jridgewell/resolve-uri": "3.1.0", - "@jridgewell/sourcemap-codec": "1.4.14" + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" } }, "node_modules/@mui/base": { @@ -2467,12 +662,12 @@ } }, "node_modules/@mui/core-downloads-tracker": { - "version": "5.11.9", - "resolved": "https://registry.npmjs.org/@mui/core-downloads-tracker/-/core-downloads-tracker-5.11.9.tgz", - "integrity": "sha512-YGEtucQ/Nl91VZkzYaLad47Cdui51n/hW+OQm4210g4N3/nZzBxmGeKfubEalf+ShKH4aYDS86XTO6q/TpZnjQ==", + "version": "5.15.20", + "resolved": "https://registry.npmjs.org/@mui/core-downloads-tracker/-/core-downloads-tracker-5.15.20.tgz", + "integrity": "sha512-DoL2ppgldL16utL8nNyj/P12f8mCNdx/Hb/AJnX9rLY4b52hCMIx1kH83pbXQ6uMy6n54M3StmEbvSGoj2OFuA==", "funding": { "type": "opencollective", - "url": "https://opencollective.com/mui" + "url": "https://opencollective.com/mui-org" } }, "node_modules/@mui/lab": { @@ -2561,12 +756,12 @@ } }, "node_modules/@mui/private-theming": { - "version": "5.11.9", - "resolved": "https://registry.npmjs.org/@mui/private-theming/-/private-theming-5.11.9.tgz", - "integrity": "sha512-XMyVIFGomVCmCm92EvYlgq3zrC9K+J6r7IKl/rBJT2/xVYoRY6uM7jeB+Wxh7kXxnW9Dbqsr2yL3cx6wSD1sAg==", + "version": "5.15.20", + "resolved": "https://registry.npmjs.org/@mui/private-theming/-/private-theming-5.15.20.tgz", + "integrity": "sha512-BK8F94AIqSrnaPYXf2KAOjGZJgWfvqAVQ2gVR3EryvQFtuBnG6RwodxrCvd3B48VuMy6Wsk897+lQMUxJyk+6g==", "dependencies": { - "@babel/runtime": "^7.20.13", - "@mui/utils": "^5.11.9", + "@babel/runtime": "^7.23.9", + "@mui/utils": "^5.15.20", "prop-types": "^15.8.1" }, "engines": { @@ -2574,7 +769,7 @@ }, "funding": { "type": "opencollective", - "url": "https://opencollective.com/mui" + "url": "https://opencollective.com/mui-org" }, "peerDependencies": { "@types/react": "^17.0.0 || ^18.0.0", @@ -2587,13 +782,13 @@ } }, "node_modules/@mui/styled-engine": { - "version": "5.11.9", - "resolved": "https://registry.npmjs.org/@mui/styled-engine/-/styled-engine-5.11.9.tgz", - "integrity": "sha512-bkh2CjHKOMy98HyOc8wQXEZvhOmDa/bhxMUekFX5IG0/w4f5HJ8R6+K6nakUUYNEgjOWPYzNPrvGB8EcGbhahQ==", + "version": "5.15.14", + "resolved": "https://registry.npmjs.org/@mui/styled-engine/-/styled-engine-5.15.14.tgz", + "integrity": "sha512-RILkuVD8gY6PvjZjqnWhz8fu68dVkqhM5+jYWfB5yhlSQKg+2rHkmEwm75XIeAqI3qwOndK6zELK5H6Zxn4NHw==", "dependencies": { - "@babel/runtime": "^7.20.13", - "@emotion/cache": "^11.10.5", - "csstype": "^3.1.1", + "@babel/runtime": "^7.23.9", + "@emotion/cache": "^11.11.0", + "csstype": "^3.1.3", "prop-types": "^15.8.1" }, "engines": { @@ -2601,7 +796,7 @@ }, "funding": { "type": "opencollective", - "url": "https://opencollective.com/mui" + "url": "https://opencollective.com/mui-org" }, "peerDependencies": { "@emotion/react": "^11.4.1", @@ -2617,6 +812,23 @@ } } }, + "node_modules/@mui/styled-engine/node_modules/@emotion/cache": { + "version": "11.11.0", + "resolved": "https://registry.npmjs.org/@emotion/cache/-/cache-11.11.0.tgz", + "integrity": "sha512-P34z9ssTCBi3e9EI1ZsWpNHcfY1r09ZO0rZbRO2ob3ZQMnFI35jB536qoXbkdesr5EUhYi22anuEJuyxifaqAQ==", + "dependencies": { + "@emotion/memoize": "^0.8.1", + "@emotion/sheet": "^1.2.2", + "@emotion/utils": "^1.2.1", + "@emotion/weak-memoize": "^0.3.1", + "stylis": "4.2.0" + } + }, + "node_modules/@mui/styled-engine/node_modules/stylis": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", + "integrity": "sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==" + }, "node_modules/@mui/system": { "version": "5.11.9", "resolved": "https://registry.npmjs.org/@mui/system/-/system-5.11.9.tgz", @@ -2657,11 +869,11 @@ } }, "node_modules/@mui/types": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/@mui/types/-/types-7.2.3.tgz", - "integrity": "sha512-tZ+CQggbe9Ol7e/Fs5RcKwg/woU+o8DCtOnccX6KmbBc7YrfqMYEYuaIcXHuhpT880QwNkZZ3wQwvtlDFA2yOw==", + "version": "7.2.14", + "resolved": "https://registry.npmjs.org/@mui/types/-/types-7.2.14.tgz", + "integrity": "sha512-MZsBZ4q4HfzBsywtXgM1Ksj6HDThtiwmOKUXH1pKYISI9gAVXCNHNpo7TlGoGrBaYWZTdNoirIN7JsQcQUjmQQ==", "peerDependencies": { - "@types/react": "*" + "@types/react": "^17.0.0 || ^18.0.0" }, "peerDependenciesMeta": { "@types/react": { @@ -2670,13 +882,12 @@ } }, "node_modules/@mui/utils": { - "version": "5.11.9", - "resolved": "https://registry.npmjs.org/@mui/utils/-/utils-5.11.9.tgz", - "integrity": "sha512-eOJaqzcEs4qEwolcvFAmXGpln+uvouvOS9FUX6Wkrte+4I8rZbjODOBDVNlK+V6/ziTfD4iNKC0G+KfOTApbqg==", + "version": "5.15.20", + "resolved": "https://registry.npmjs.org/@mui/utils/-/utils-5.15.20.tgz", + "integrity": "sha512-mAbYx0sovrnpAu1zHc3MDIhPqL8RPVC5W5xcO1b7PiSCJPtckIZmBkp8hefamAvUiAV8gpfMOM6Zb+eSisbI2A==", "dependencies": { - "@babel/runtime": "^7.20.13", - "@types/prop-types": "^15.7.5", - "@types/react-is": "^16.7.1 || ^17.0.0", + "@babel/runtime": "^7.23.9", + "@types/prop-types": "^15.7.11", "prop-types": "^15.8.1", "react-is": "^18.2.0" }, @@ -2685,10 +896,16 @@ }, "funding": { "type": "opencollective", - "url": "https://opencollective.com/mui" + "url": "https://opencollective.com/mui-org" }, "peerDependencies": { + "@types/react": "^17.0.0 || ^18.0.0", "react": "^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } } }, "node_modules/@mui/x-date-pickers": { @@ -2750,23 +967,69 @@ } }, "node_modules/@next/env": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/env/-/env-13.5.6.tgz", - "integrity": "sha512-Yac/bV5sBGkkEXmAX5FWPS9Mmo2rthrOPRQQNfycJPkjUAUclomCPH7QFVCDQ4Mp2k2K1SSM6m0zrxYrOwtFQw==" + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.4.tgz", + "integrity": "sha512-3EtkY5VDkuV2+lNmKlbkibIJxcO4oIHEhBWne6PaAp+76J9KoSsGvNikp6ivzAT8dhhBMYrm6op2pS1ApG0Hzg==" }, "node_modules/@next/eslint-plugin-next": { - "version": "13.1.6", - "resolved": "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-13.1.6.tgz", - "integrity": "sha512-o7cauUYsXjzSJkay8wKjpKJf2uLzlggCsGUkPu3lP09Pv97jYlekTC20KJrjQKmSv5DXV0R/uks2ZXhqjNkqAw==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-14.2.4.tgz", + "integrity": "sha512-svSFxW9f3xDaZA3idQmlFw7SusOuWTpDTAeBlO3AEPDltrraV+lqs7mAc6A27YdnpQVVIA3sODqUAAHdWhVWsA==", "dev": true, "dependencies": { - "glob": "7.1.7" + "glob": "10.3.10" + } + }, + "node_modules/@next/eslint-plugin-next/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@next/eslint-plugin-next/node_modules/glob": { + "version": "10.3.10", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.10.tgz", + "integrity": "sha512-fa46+tv1Ak0UPK1TOy/pZrIybNNt4HCv7SDzwyfiOZkvZLEbjsZkJBPtDHVshZjbecAoAGSC20MjLDG/qr679g==", + "dev": true, + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^2.3.5", + "minimatch": "^9.0.1", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0", + "path-scurry": "^1.10.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@next/eslint-plugin-next/node_modules/minimatch": { + "version": "9.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.4.tgz", + "integrity": "sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, "node_modules/@next/swc-darwin-arm64": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-13.5.6.tgz", - "integrity": "sha512-5nvXMzKtZfvcu4BhtV0KH1oGv4XEW+B+jOfmBdpFI3C7FrB/MfujRpWYSBBO64+qbW8pkZiSyQv9eiwnn5VIQA==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.4.tgz", + "integrity": "sha512-AH3mO4JlFUqsYcwFUHb1wAKlebHU/Hv2u2kb1pAuRanDZ7pD/A/KPD98RHZmwsJpdHQwfEc/06mgpSzwrJYnNg==", "cpu": [ "arm64" ], @@ -2779,9 +1042,9 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-13.5.6.tgz", - "integrity": "sha512-6cgBfxg98oOCSr4BckWjLLgiVwlL3vlLj8hXg2b+nDgm4bC/qVXXLfpLB9FHdoDu4057hzywbxKvmYGmi7yUzA==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.4.tgz", + "integrity": "sha512-QVadW73sWIO6E2VroyUjuAxhWLZWEpiFqHdZdoQ/AMpN9YWGuHV8t2rChr0ahy+irKX5mlDU7OY68k3n4tAZTg==", "cpu": [ "x64" ], @@ -2794,9 +1057,9 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-13.5.6.tgz", - "integrity": "sha512-txagBbj1e1w47YQjcKgSU4rRVQ7uF29YpnlHV5xuVUsgCUf2FmyfJ3CPjZUvpIeXCJAoMCFAoGnbtX86BK7+sg==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.4.tgz", + "integrity": "sha512-KT6GUrb3oyCfcfJ+WliXuJnD6pCpZiosx2X3k66HLR+DMoilRb76LpWPGb4tZprawTtcnyrv75ElD6VncVamUQ==", "cpu": [ "arm64" ], @@ -2809,9 +1072,9 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-13.5.6.tgz", - "integrity": "sha512-cGd+H8amifT86ZldVJtAKDxUqeFyLWW+v2NlBULnLAdWsiuuN8TuhVBt8ZNpCqcAuoruoSWynvMWixTFcroq+Q==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.4.tgz", + "integrity": "sha512-Alv8/XGSs/ytwQcbCHwze1HmiIkIVhDHYLjczSVrf0Wi2MvKn/blt7+S6FJitj3yTlMwMxII1gIJ9WepI4aZ/A==", "cpu": [ "arm64" ], @@ -2824,9 +1087,9 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-13.5.6.tgz", - "integrity": "sha512-Mc2b4xiIWKXIhBy2NBTwOxGD3nHLmq4keFk+d4/WL5fMsB8XdJRdtUlL87SqVCTSaf1BRuQQf1HvXZcy+rq3Nw==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.4.tgz", + "integrity": "sha512-ze0ShQDBPCqxLImzw4sCdfnB3lRmN3qGMB2GWDRlq5Wqy4G36pxtNOo2usu/Nm9+V2Rh/QQnrRc2l94kYFXO6Q==", "cpu": [ "x64" ], @@ -2839,9 +1102,9 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-13.5.6.tgz", - "integrity": "sha512-CFHvP9Qz98NruJiUnCe61O6GveKKHpJLloXbDSWRhqhkJdZD2zU5hG+gtVJR//tyW897izuHpM6Gtf6+sNgJPQ==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.4.tgz", + "integrity": "sha512-8dwC0UJoc6fC7PX70csdaznVMNr16hQrTDAMPvLPloazlcaWfdPogq+UpZX6Drqb1OBlwowz8iG7WR0Tzk/diQ==", "cpu": [ "x64" ], @@ -2854,9 +1117,9 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-13.5.6.tgz", - "integrity": "sha512-aFv1ejfkbS7PUa1qVPwzDHjQWQtknzAZWGTKYIAaS4NMtBlk3VyA6AYn593pqNanlicewqyl2jUhQAaFV/qXsg==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.4.tgz", + "integrity": "sha512-jxyg67NbEWkDyvM+O8UDbPAyYRZqGLQDTPwvrBBeOSyVWW/jFQkQKQ70JDqDSYg1ZDdl+E3nkbFbq8xM8E9x8A==", "cpu": [ "arm64" ], @@ -2869,9 +1132,9 @@ } }, "node_modules/@next/swc-win32-ia32-msvc": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-13.5.6.tgz", - "integrity": "sha512-XqqpHgEIlBHvzwG8sp/JXMFkLAfGLqkbVsyN+/Ih1mR8INb6YCc2x/Mbwi6hsAgUnqQztz8cvEbHJUbSl7RHDg==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.4.tgz", + "integrity": "sha512-twrmN753hjXRdcrZmZttb/m5xaCBFa48Dt3FbeEItpJArxriYDunWxJn+QFXdJ3hPkm4u7CKxncVvnmgQMY1ag==", "cpu": [ "ia32" ], @@ -2884,9 +1147,9 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-13.5.6.tgz", - "integrity": "sha512-Cqfe1YmOS7k+5mGu92nl5ULkzpKuxJrP3+4AEuPmrpFZ3BHxTY3TnHmU1On3bFmFFs6FbTcdF58CCUProGpIGQ==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.4.tgz", + "integrity": "sha512-tkLrjBzqFTP8DVrAAQmZelEahfR9OxWpFR++vAI9FBhCiIxtwHwBHC23SBHCTURBtwB4kc/x44imVOnkKGNVGg==", "cpu": [ "x64" ], @@ -2898,12 +1161,6 @@ "node": ">= 10" } }, - "node_modules/@nicolo-ribaudo/chokidar-2": { - "version": "2.1.8-no-fsevents.3", - "resolved": "https://registry.npmjs.org/@nicolo-ribaudo/chokidar-2/-/chokidar-2-2.1.8-no-fsevents.3.tgz", - "integrity": "sha512-s88O1aVtXftvp5bCPB7WnmXc5IwOZZ7YPuwNPt+GtOOXpPvad1LfbmjYv+qII7zP6RU2QGnqve27dnLycEnyEQ==", - "optional": true - }, "node_modules/@nodelib/fs.scandir": { "version": "2.1.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", @@ -2939,64 +1196,54 @@ "node": ">= 8" } }, - "node_modules/@pkgr/utils": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/@pkgr/utils/-/utils-2.3.1.tgz", - "integrity": "sha512-wfzX8kc1PMyUILA+1Z/EqoE4UCXGy0iRGMhPwdfae1+f0OXlLqCk+By+aMzgJBzR9AzS4CDizioG6Ss1gvAFJw==", + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", "dev": true, - "dependencies": { - "cross-spawn": "^7.0.3", - "is-glob": "^4.0.3", - "open": "^8.4.0", - "picocolors": "^1.0.0", - "tiny-glob": "^0.2.9", - "tslib": "^2.4.0" - }, + "optional": true, "engines": { - "node": "^12.20.0 || ^14.18.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/unts" + "node": ">=14" } }, - "node_modules/@pkgr/utils/node_modules/tslib": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.5.0.tgz", - "integrity": "sha512-336iVw3rtn2BUK7ORdIAHTyxHGRIHVReokCR3XjbckJMK7ms8FysBfhLR8IXnAgy7T0PTPNBWKiH514FOW/WSg==", - "dev": true - }, "node_modules/@popperjs/core": { - "version": "2.11.6", - "resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.6.tgz", - "integrity": "sha512-50/17A98tWUfQ176raKiOGXuYpLyyVMkxxG6oylzL3BPOlA6ADGdK7EYunSa4I064xerltq9TGXs8HmOk5E+vw==", + "version": "2.11.8", + "resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.8.tgz", + "integrity": "sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A==", "funding": { "type": "opencollective", "url": "https://opencollective.com/popperjs" } }, "node_modules/@rushstack/eslint-patch": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.2.0.tgz", - "integrity": "sha512-sXo/qW2/pAcmT43VoRKOJbDOfV3cYpq3szSVfIThQXNt+E4DfKj361vaAt3c88U5tPUxzEswam7GW48PJqtKAg==", + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.10.3.tgz", + "integrity": "sha512-qC/xYId4NMebE6w/V33Fh9gWxLgURiNYgVNObbJl2LZv0GUUItCcCqC5axQSwRaAgaxl2mELq1rMzlswaQ0Zxg==", "dev": true }, "node_modules/@socket.io/component-emitter": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@socket.io/component-emitter/-/component-emitter-3.1.0.tgz", - "integrity": "sha512-+9jVqKhRSpsc591z5vX+X5Yyw+he/HCB4iQ/RYxw35CEPaY1gnsNE43nf9n9AaYjAQrTiI/mOwKUKdUs9vf7Xg==" + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@socket.io/component-emitter/-/component-emitter-3.1.2.tgz", + "integrity": "sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==" + }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==" }, "node_modules/@swc/helpers": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.2.tgz", - "integrity": "sha512-E4KcWTpoLHqwPHLxidpOqQbcrZVgi0rsmmZXUle1jXmJfuIf/UWpczUJ7MZZ5tlxytgJXyp0w4PGkkeLiuIdZw==", + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz", + "integrity": "sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==", "dependencies": { + "@swc/counter": "^0.1.3", "tslib": "^2.4.0" } }, "node_modules/@swc/helpers/node_modules/tslib": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", - "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.3.tgz", + "integrity": "sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==" }, "node_modules/@types/json5": { "version": "0.0.29", @@ -3004,6 +1251,19 @@ "integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==", "dev": true }, + "node_modules/@types/lodash": { + "version": "4.17.5", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.5.tgz", + "integrity": "sha512-MBIOHVZqVqgfro1euRDWX7OO0fBVUUMrN6Pwm8LQsz8cWhEpihlvR70ENj3f40j58TNxZaWv2ndSkInykNBBJw==" + }, + "node_modules/@types/lodash-es": { + "version": "4.17.12", + "resolved": "https://registry.npmjs.org/@types/lodash-es/-/lodash-es-4.17.12.tgz", + "integrity": "sha512-0NgftHUcV4v34VhXm8QBSftKVXtbkBG3ViCjs6+eJ5a6y6Mi/jiFGPc1sC7QK+9BFhWrURE3EOggmWaSxL9OzQ==", + "dependencies": { + "@types/lodash": "*" + } + }, "node_modules/@types/node": { "version": "18.13.0", "resolved": "https://registry.npmjs.org/@types/node/-/node-18.13.0.tgz", @@ -3023,14 +1283,14 @@ "dev": true }, "node_modules/@types/parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==" + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.2.tgz", + "integrity": "sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==" }, "node_modules/@types/prop-types": { - "version": "15.7.5", - "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz", - "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==" + "version": "15.7.12", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.12.tgz", + "integrity": "sha512-5zvhXYtRNRluoE/jAp4GVsSduVUzNWKkOZrCDBWYtE7biZywwdC2AcEzg+cSMLFRfVgeAFqpfNabiPjxFddV1Q==" }, "node_modules/@types/react": { "version": "18.0.28", @@ -3051,36 +1311,33 @@ "@types/react": "*" } }, - "node_modules/@types/react-is": { - "version": "17.0.3", - "resolved": "https://registry.npmjs.org/@types/react-is/-/react-is-17.0.3.tgz", - "integrity": "sha512-aBTIWg1emtu95bLTLx0cpkxwGW3ueZv71nE2YFBpL8k/z5czEW8yYpOo8Dp+UUAFAtKwNaOsh/ioSeQnWlZcfw==", - "dependencies": { - "@types/react": "*" - } - }, "node_modules/@types/react-transition-group": { - "version": "4.4.5", - "resolved": "https://registry.npmjs.org/@types/react-transition-group/-/react-transition-group-4.4.5.tgz", - "integrity": "sha512-juKD/eiSM3/xZYzjuzH6ZwpP+/lejltmiS3QEzV/vmb/Q8+HfDmxu+Baga8UEMGBqV88Nbg4l2hY/K2DkyaLLA==", + "version": "4.4.10", + "resolved": "https://registry.npmjs.org/@types/react-transition-group/-/react-transition-group-4.4.10.tgz", + "integrity": "sha512-hT/+s0VQs2ojCX823m60m5f0sL5idt9SO6Tj6Dg+rdphGPIeJbJ6CxvBYkgkGKrYeDjvIpKTR38UzmtHJOGW3Q==", "dependencies": { "@types/react": "*" } }, "node_modules/@types/scheduler": { - "version": "0.16.2", - "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.2.tgz", - "integrity": "sha512-hppQEBDmlwhFAXKJX2KnWLYu5yMfi91yazPb2l+lbJiwW+wdo1gNeRA+3RgNSO39WYX2euey41KEwnqesU2Jew==" + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.23.0.tgz", + "integrity": "sha512-YIoDCTH3Af6XM5VuwGG/QL/CJqga1Zm3NkU3HZ4ZHK2fRMPYP1VczsTUqtsf43PH/iJNVlPHAo2oWX7BSdB2Hw==" + }, + "node_modules/@types/stylis": { + "version": "4.2.5", + "resolved": "https://registry.npmjs.org/@types/stylis/-/stylis-4.2.5.tgz", + "integrity": "sha512-1Xve+NMN7FWjY14vLoY5tL3BVEQ/n42YLwaqJIPYhotZ9uBHt87VceMwWQpzmdEt2TNXIorIFG+YeCUUW7RInw==" }, "node_modules/@typescript-eslint/parser": { - "version": "5.53.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.53.0.tgz", - "integrity": "sha512-MKBw9i0DLYlmdOb3Oq/526+al20AJZpANdT6Ct9ffxcV8nKCHz63t/S0IhlTFNsBIHJv+GY5SFJ0XfqVeydQrQ==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.62.0.tgz", + "integrity": "sha512-VlJEV0fOQ7BExOsHYAGrgbEiZoi8D+Bl2+f6V2RrXerRSylnp+ZBHmPvaIa8cz0Ajx7WO7Z5RqfgYg7ED1nRhA==", "dev": true, "dependencies": { - "@typescript-eslint/scope-manager": "5.53.0", - "@typescript-eslint/types": "5.53.0", - "@typescript-eslint/typescript-estree": "5.53.0", + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/typescript-estree": "5.62.0", "debug": "^4.3.4" }, "engines": { @@ -3100,13 +1357,13 @@ } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "5.53.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.53.0.tgz", - "integrity": "sha512-Opy3dqNsp/9kBBeCPhkCNR7fmdSQqA+47r21hr9a14Bx0xnkElEQmhoHga+VoaoQ6uDHjDKmQPIYcUcKJifS7w==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.62.0.tgz", + "integrity": "sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w==", "dev": true, "dependencies": { - "@typescript-eslint/types": "5.53.0", - "@typescript-eslint/visitor-keys": "5.53.0" + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/visitor-keys": "5.62.0" }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -3117,9 +1374,9 @@ } }, "node_modules/@typescript-eslint/types": { - "version": "5.53.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.53.0.tgz", - "integrity": "sha512-5kcDL9ZUIP756K6+QOAfPkigJmCPHcLN7Zjdz76lQWWDdzfOhZDTj1irs6gPBKiXx5/6O3L0+AvupAut3z7D2A==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.62.0.tgz", + "integrity": "sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==", "dev": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -3130,13 +1387,13 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "5.53.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.53.0.tgz", - "integrity": "sha512-eKmipH7QyScpHSkhbptBBYh9v8FxtngLquq292YTEQ1pxVs39yFBlLC1xeIZcPPz1RWGqb7YgERJRGkjw8ZV7w==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.62.0.tgz", + "integrity": "sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==", "dev": true, "dependencies": { - "@typescript-eslint/types": "5.53.0", - "@typescript-eslint/visitor-keys": "5.53.0", + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/visitor-keys": "5.62.0", "debug": "^4.3.4", "globby": "^11.1.0", "is-glob": "^4.0.3", @@ -3157,12 +1414,12 @@ } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "5.53.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.53.0.tgz", - "integrity": "sha512-JqNLnX3leaHFZEN0gCh81sIvgrp/2GOACZNgO4+Tkf64u51kTpAyWFOY8XHx8XuXr3N2C9zgPPHtcpMg6z1g0w==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.62.0.tgz", + "integrity": "sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==", "dev": true, "dependencies": { - "@typescript-eslint/types": "5.53.0", + "@typescript-eslint/types": "5.62.0", "eslint-visitor-keys": "^3.3.0" }, "engines": { @@ -3174,9 +1431,9 @@ } }, "node_modules/acorn": { - "version": "8.8.2", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz", - "integrity": "sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==", + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", + "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", "dev": true, "bin": { "acorn": "bin/acorn" @@ -3220,31 +1477,14 @@ } }, "node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", "dependencies": { - "color-convert": "^2.0.1" + "color-convert": "^1.9.0" }, "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/anymatch": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", - "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", - "optional": true, - "dependencies": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" - }, - "engines": { - "node": ">= 8" + "node": ">=4" } }, "node_modules/apexcharts": { @@ -3267,24 +1507,41 @@ "dev": true }, "node_modules/aria-query": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.1.3.tgz", - "integrity": "sha512-R5iJ5lkuHybztUfuOAznmboyjWq8O6sqNqtK7CLOqdydi54VNbORp49mb14KbWgG1QD3JFO9hJdZ+y4KutfdOQ==", + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz", + "integrity": "sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==", "dev": true, "dependencies": { - "deep-equal": "^2.0.5" + "dequal": "^2.0.3" + } + }, + "node_modules/array-buffer-byte-length": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.1.tgz", + "integrity": "sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.5", + "is-array-buffer": "^3.0.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/array-includes": { - "version": "3.1.6", - "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.6.tgz", - "integrity": "sha512-sgTbLvL6cNnw24FnbaDyjmvddQ2ML8arZsgaJhoABMoplz/4QRhtrYS+alr1BUM1Bwp6dhx8vVCBSLG+StwOFw==", + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.8.tgz", + "integrity": "sha512-itaWrbYbqpGXkGhZPGUulwnhVf5Hpy1xiCFsGqyIGglbBxmG5vSjxQen3/WGOjPpNEv1RtBLKxbmVXm8HpJStQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "get-intrinsic": "^1.1.3", + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.4", "is-string": "^1.0.7" }, "engines": { @@ -3303,15 +1560,55 @@ "node": ">=8" } }, + "node_modules/array.prototype.findlast": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.findlast/-/array.prototype.findlast-1.2.5.tgz", + "integrity": "sha512-CVvd6FHg1Z3POpBLxO6E6zr+rSKEQ9L6rZHAaY7lLfhKsWYUBBOuMs0e9o24oopj6H+geRCX0YJ+TJLBK2eHyQ==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.findlastindex": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.5.tgz", + "integrity": "sha512-zfETvRFA8o7EiNn++N5f/kaCw221hrpGsDmcpndVupkPzEc1Wuf3VgC0qby1BbHs7f5DVYjgtEU2LLh5bqeGfQ==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/array.prototype.flat": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.1.tgz", - "integrity": "sha512-roTU0KWIOmJ4DRLmwKd19Otg0/mT3qPNt0Qb3GWW8iObuZXxrjB/pzn0R3hqpRSWg4HCwqx+0vwOnWnvlOyeIA==", + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.2.tgz", + "integrity": "sha512-djYB+Zx2vLewY8RWlNCUdHjDXs2XOgm602S9E7P/UpHgfeHL00cRiIF+IN/G/aUJ7kGPb6yO/ErDI5V2s8iycA==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", "es-shim-unscopables": "^1.0.0" }, "engines": { @@ -3322,14 +1619,14 @@ } }, "node_modules/array.prototype.flatmap": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.1.tgz", - "integrity": "sha512-8UGn9O1FDVvMNB0UlLv4voxRMze7+FpHyF5mSMRjWHUMlpoDViniy05870VlxhfgTnLbpuwTzvD76MTtWxB/mQ==", + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.2.tgz", + "integrity": "sha512-Ewyx0c9PmpcsByhSW4r+9zDU7sGjFc86qf/kKtuSCRdhfbk0SNLLkaT5qvcHnRGgc5NP/ly/y+qkXkqONX54CQ==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", "es-shim-unscopables": "^1.0.0" }, "engines": { @@ -3339,30 +1636,70 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/array.prototype.tosorted": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.1.tgz", - "integrity": "sha512-pZYPXPRl2PqWcsUs6LOMn+1f1532nEoPTYowBtqLwAW+W8vSVhkIGnmOX1t/UQjD6YGI0vcD2B1U7ZFGQH9jnQ==", + "node_modules/array.prototype.toreversed": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/array.prototype.toreversed/-/array.prototype.toreversed-1.1.2.tgz", + "integrity": "sha512-wwDCoT4Ck4Cz7sLtgUmzR5UV3YF5mFHUlbChCzZBQZ+0m2cl/DH3tKgvphv1nKgFsJ48oCSg6p91q2Vm0I/ZMA==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "es-shim-unscopables": "^1.0.0", - "get-intrinsic": "^1.1.3" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-shim-unscopables": "^1.0.0" + } + }, + "node_modules/array.prototype.tosorted": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.4.tgz", + "integrity": "sha512-p6Fx8B7b7ZhL/gmUsAy0D15WhvDccw3mnGNbZpi3pmeJdxtWsj2jEaI4Y6oo3XiHfzuSgPwKc04MYt6KgvC/wA==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3", + "es-errors": "^1.3.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/arraybuffer.prototype.slice": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.3.tgz", + "integrity": "sha512-bMxMKAjg13EBSVscxTaYA4mRc5t1UAXa2kXiGTNfZ079HIWXEkKmkgFrh/nJqamaLSrXO5H4WFFkPEaLJWbs3A==", + "dev": true, + "dependencies": { + "array-buffer-byte-length": "^1.0.1", + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "es-abstract": "^1.22.3", + "es-errors": "^1.2.1", + "get-intrinsic": "^1.2.3", + "is-array-buffer": "^3.0.4", + "is-shared-array-buffer": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/ast-types-flow": { - "version": "0.0.7", - "resolved": "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.7.tgz", - "integrity": "sha512-eBvWn1lvIApYMhzQMsu9ciLfkBY499mFZlNqG+/9WR7PVlroQw0vG30cOQQbaKz3sCEc44TAOu2ykzqXSNnwag==", + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.8.tgz", + "integrity": "sha512-OH/2E5Fg20h2aPrbe+QL8JZQFko0YZaF+j4mnQ7BGhfavO7OpSLa8a0y9sBwomHdSbkhTS8TQNayBfnW5DwbvQ==", "dev": true }, "node_modules/available-typed-arrays": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz", - "integrity": "sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", + "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", "dev": true, + "dependencies": { + "possible-typed-array-names": "^1.0.0" + }, "engines": { "node": ">= 0.4" }, @@ -3371,21 +1708,21 @@ } }, "node_modules/axe-core": { - "version": "4.6.3", - "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.6.3.tgz", - "integrity": "sha512-/BQzOX780JhsxDnPpH4ZiyrJAzcd8AfzFPkv+89veFSr1rcMjuq2JDCwypKaPeB6ljHp9KjXhPpjgCvQlWYuqg==", + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.7.0.tgz", + "integrity": "sha512-M0JtH+hlOL5pLQwHOLNYZaXuhqmvS8oExsqB1SBYgA4Dk7u/xx+YdGHXaK5pyUfed5mYXdlYiphWq3G8cRi5JQ==", "dev": true, "engines": { "node": ">=4" } }, "node_modules/axobject-query": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-3.1.1.tgz", - "integrity": "sha512-goKlv8DZrK9hUh975fnHzhNIO4jUnFCfv/dszV5VwUGDFjI6vQ2VwoyjYjYNEbBE8AH87TduWP5uyDR1D+Iteg==", + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-3.2.1.tgz", + "integrity": "sha512-jsyHu61e6N4Vbz/v18DHwWYKK0bSWLqn47eeDSKPB7m8tqMHF9YJ+mhIk2lVteyZrY8tnSj/jHOv4YiTCuCJgg==", "dev": true, "dependencies": { - "deep-equal": "^2.0.5" + "dequal": "^2.0.3" } }, "node_modules/babel-plugin-macros": { @@ -3402,54 +1739,11 @@ "npm": ">=6" } }, - "node_modules/babel-plugin-polyfill-corejs2": { - "version": "0.4.3", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.3.tgz", - "integrity": "sha512-bM3gHc337Dta490gg+/AseNB9L4YLHxq1nGKZZSHbhXv4aTYU2MD2cjza1Ru4S6975YLTaL1K8uJf6ukJhhmtw==", - "dependencies": { - "@babel/compat-data": "^7.17.7", - "@babel/helper-define-polyfill-provider": "^0.4.0", - "semver": "^6.1.1" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/babel-plugin-polyfill-corejs2/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/babel-plugin-polyfill-corejs3": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.8.1.tgz", - "integrity": "sha512-ikFrZITKg1xH6pLND8zT14UPgjKHiGLqex7rGEZCH2EvhsneJaJPemmpQaIZV5AL03II+lXylw3UmddDK8RU5Q==", - "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.4.0", - "core-js-compat": "^3.30.1" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/babel-plugin-polyfill-regenerator": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.5.0.tgz", - "integrity": "sha512-hDJtKjMLVa7Z+LwnTCxoDLQj6wdc+B8dun7ayF2fYieI6OzfuvcLMB32ihJZ4UhCBwNYGl5bg/x/P9cMdnkc2g==", - "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.4.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true }, "node_modules/base64-js": { "version": "1.5.1", @@ -3470,86 +1764,28 @@ } ] }, - "node_modules/binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", - "optional": true, - "engines": { - "node": ">=8" - } - }, "node_modules/brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "devOptional": true, + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, "dependencies": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" }, "engines": { "node": ">=8" } }, - "node_modules/browserslist": { - "version": "4.21.9", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.9.tgz", - "integrity": "sha512-M0MFoZzbUrRU4KNfCrDLnvyE7gub+peetoTid3TBIqtunaDJyXlwhakT+/VkvSXcfIzFfK/nkCs4nmyTmxdNSg==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/browserslist" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "dependencies": { - "caniuse-lite": "^1.0.30001503", - "electron-to-chromium": "^1.4.431", - "node-releases": "^2.0.12", - "update-browserslist-db": "^1.0.11" - }, - "bin": { - "browserslist": "cli.js" - }, - "engines": { - "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" - } - }, - "node_modules/browserslist/node_modules/caniuse-lite": { - "version": "1.0.30001504", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001504.tgz", - "integrity": "sha512-5uo7eoOp2mKbWyfMXnGO9rJWOGU8duvzEiYITW+wivukL7yHH4gX9yuRaobu6El4jPxo6jKZfG+N6fB621GD/Q==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/caniuse-lite" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ] - }, "node_modules/buffer": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", @@ -3590,13 +1826,19 @@ } }, "node_modules/call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", "dev": true, "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -3624,9 +1866,9 @@ "integrity": "sha512-ceOhN1DL7Y4O6M0j9ICgmTYziV89WMd96SvSl0REd8PMgrY0B/WBOPoed5S1KUmJqXgUXh8gzSe6E3ae27upsQ==" }, "node_modules/caniuse-lite": { - "version": "1.0.30001457", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001457.tgz", - "integrity": "sha512-SDIV6bgE1aVbK6XyxdURbUE89zY7+k1BBBaOwYwkNCglXlel/E7mELiHC64HQ+W0xSKlqWhV9Wh7iHxUjMs4fA==", + "version": "1.0.30001633", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001633.tgz", + "integrity": "sha512-6sT0yf/z5jqf8tISAgpJDrmwOpLsrpnyCdD/lOZKvKkkJK4Dn0X5i7KF7THEZhOq+30bmhwBlNEaqPUiHiKtZg==", "funding": [ { "type": "opencollective", @@ -3635,62 +1877,32 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ] }, "node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" }, "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" + "node": ">=4" } }, - "node_modules/chokidar": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", - "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", - "funding": [ - { - "type": "individual", - "url": "https://paulmillr.com/funding/" - } - ], - "optional": true, - "dependencies": { - "anymatch": "~3.1.2", - "braces": "~3.0.2", - "glob-parent": "~5.1.2", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.6.0" - }, + "node_modules/chalk/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", "engines": { - "node": ">= 8.10.0" - }, - "optionalDependencies": { - "fsevents": "~2.3.2" - } - }, - "node_modules/chokidar/node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "optional": true, - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" + "node": ">=0.8.0" } }, "node_modules/client-only": { @@ -3707,53 +1919,29 @@ } }, "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" + "color-name": "1.1.3" } }, "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/commander": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", - "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", - "engines": { - "node": ">= 6" - } + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" }, "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true }, "node_modules/convert-source-map": { "version": "1.9.0", "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==" }, - "node_modules/core-js-compat": { - "version": "3.31.0", - "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.31.0.tgz", - "integrity": "sha512-hM7YCu1cU6Opx7MXNu0NuumM0ezNeAeRKadixyiQELWY3vT3De9S4J5ZBMraWV2vZnrE1Cirl0GtFtDtMUXzPw==", - "dependencies": { - "browserslist": "^4.21.5" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/core-js" - } - }, "node_modules/core-util-is": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", @@ -3807,9 +1995,9 @@ } }, "node_modules/csstype": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.1.tgz", - "integrity": "sha512-DJR/VvkAvSZW9bTouZue2sSxDwdTN92uHjqeKVm+0dAqdfNykRzQ95tay8aXMBAAPpUiq4Qcug2L7neoRh2Egw==" + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==" }, "node_modules/damerau-levenshtein": { "version": "1.0.8", @@ -3817,6 +2005,57 @@ "integrity": "sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==", "dev": true }, + "node_modules/data-view-buffer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/data-view-buffer/-/data-view-buffer-1.0.1.tgz", + "integrity": "sha512-0lht7OugA5x3iJLOWFhWK/5ehONdprk0ISXqVFn/NFrDu+cuc8iADFrGQz5BnRK7LLU3JmkbXSxaqX+/mXYtUA==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/data-view-byte-length": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/data-view-byte-length/-/data-view-byte-length-1.0.1.tgz", + "integrity": "sha512-4J7wRJD3ABAzr8wP+OcIcqq2dlUKp4DVflx++hs5h5ZKydWMI6/D/fAot+yh6g2tHh8fLFTvNOaVN357NvSrOQ==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/data-view-byte-offset": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/data-view-byte-offset/-/data-view-byte-offset-1.0.0.tgz", + "integrity": "sha512-t/Ygsytq+R995EJ5PZlD4Cu56sWa8InXySaViRzw9apusqsOO2bQP+SbYzAhR0pFKoB+43lYy8rWban9JSuXnA==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/date-fns": { "version": "2.29.3", "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-2.29.3.tgz", @@ -3830,9 +2069,9 @@ } }, "node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.3.5", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.5.tgz", + "integrity": "sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==", "dependencies": { "ms": "2.1.2" }, @@ -3845,34 +2084,6 @@ } } }, - "node_modules/deep-equal": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-2.2.0.tgz", - "integrity": "sha512-RdpzE0Hv4lhowpIUKKMJfeH6C1pXdtT1/it80ubgWqwI3qpuxUBpC1S4hnHg+zjnuOoDkzUtUCEEkG+XG5l3Mw==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.2", - "es-get-iterator": "^1.1.2", - "get-intrinsic": "^1.1.3", - "is-arguments": "^1.1.1", - "is-array-buffer": "^3.0.1", - "is-date-object": "^1.0.5", - "is-regex": "^1.1.4", - "is-shared-array-buffer": "^1.0.2", - "isarray": "^2.0.5", - "object-is": "^1.1.5", - "object-keys": "^1.1.1", - "object.assign": "^4.1.4", - "regexp.prototype.flags": "^1.4.3", - "side-channel": "^1.0.4", - "which-boxed-primitive": "^1.0.2", - "which-collection": "^1.0.1", - "which-typed-array": "^1.1.9" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/deep-is": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", @@ -3887,21 +2098,30 @@ "node": ">=0.10.0" } }, - "node_modules/define-lazy-prop": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", - "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", "dev": true, + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, "engines": { - "node": ">=8" + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/define-properties": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.0.tgz", - "integrity": "sha512-xvqAVKGfT1+UAvPwKTVw/njhdQ8ZhXK4lI0bCIuCMrp2up9nPnaDftrLtmpTazqd1o+UY4zgzU+avtMbDP+ldA==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", "dev": true, "dependencies": { + "define-data-property": "^1.0.1", "has-property-descriptors": "^1.0.0", "object-keys": "^1.1.1" }, @@ -3912,6 +2132,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, "node_modules/dir-glob": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", @@ -3959,9 +2188,9 @@ "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" }, "node_modules/duplexer2/node_modules/readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", @@ -3972,6 +2201,11 @@ "util-deprecate": "~1.0.1" } }, + "node_modules/duplexer2/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, "node_modules/duplexer2/node_modules/string_decoder": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", @@ -3980,10 +2214,11 @@ "safe-buffer": "~5.1.0" } }, - "node_modules/electron-to-chromium": { - "version": "1.4.433", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.433.tgz", - "integrity": "sha512-MGO1k0w1RgrfdbLVwmXcDhHHuxCn2qRgR7dYsJvWFKDttvYPx6FNzCGG0c/fBBvzK2LDh3UV7Tt9awnHnvAAUQ==" + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "dev": true }, "node_modules/emoji-regex": { "version": "9.2.2", @@ -3992,29 +2227,29 @@ "dev": true }, "node_modules/engine.io-client": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/engine.io-client/-/engine.io-client-6.4.0.tgz", - "integrity": "sha512-GyKPDyoEha+XZ7iEqam49vz6auPnNJ9ZBfy89f+rMMas8AuiMWOZ9PVzu8xb9ZC6rafUqiGHSCfu22ih66E+1g==", + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/engine.io-client/-/engine.io-client-6.5.3.tgz", + "integrity": "sha512-9Z0qLB0NIisTRt1DZ/8U2k12RJn8yls/nXMZLn+/N8hANT3TcYjKFKcwbw5zFQiN4NTde3TSY9zb79e1ij6j9Q==", "dependencies": { "@socket.io/component-emitter": "~3.1.0", "debug": "~4.3.1", - "engine.io-parser": "~5.0.3", + "engine.io-parser": "~5.2.1", "ws": "~8.11.0", "xmlhttprequest-ssl": "~2.0.0" } }, "node_modules/engine.io-parser": { - "version": "5.0.7", - "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-5.0.7.tgz", - "integrity": "sha512-P+jDFbvK6lE3n1OL+q9KuzdOFWkkZ/cMV9gol/SbVfpyqfvrfrFTOFJ6fQm2VC3PZHlU3QPhVwmbsCnauHF2MQ==", + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-5.2.2.tgz", + "integrity": "sha512-RcyUFKA93/CXH20l4SoVvzZfrSDMOTUS3bWVpTt2FuFP+XYrL8i8oonHP7WInRyVHXh0n/ORtoeiE1os+8qkSw==", "engines": { "node": ">=10.0.0" } }, "node_modules/enhanced-resolve": { - "version": "5.12.0", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.12.0.tgz", - "integrity": "sha512-QHTXI/sZQmko1cbDoNAa3mJ5qhWUUNAq3vR0/YiD379fWQrcfuoX1+HW2S0MTt7XmoPLapdaDKUtelUSPic7hQ==", + "version": "5.17.0", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.0.tgz", + "integrity": "sha512-dwDPwZL0dmye8Txp2gzFmA6sxALaSvdRDjPH0viLcKrtlOL3tw62nWWweVD1SdILDTJrbrL6tdWVN58Wo6U3eA==", "dev": true, "dependencies": { "graceful-fs": "^4.2.4", @@ -4038,44 +2273,57 @@ } }, "node_modules/es-abstract": { - "version": "1.21.1", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.21.1.tgz", - "integrity": "sha512-QudMsPOz86xYz/1dG1OuGBKOELjCh99IIWHLzy5znUB6j8xG2yMA7bfTV86VSqKF+Y/H08vQPR+9jyXpuC6hfg==", + "version": "1.23.3", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.23.3.tgz", + "integrity": "sha512-e+HfNH61Bj1X9/jLc5v1owaLYuHdeHHSQlkhCBiTK8rBvKaULl/beGMxwrMXjpYrv4pz22BlY570vVePA2ho4A==", "dev": true, "dependencies": { - "available-typed-arrays": "^1.0.5", - "call-bind": "^1.0.2", - "es-set-tostringtag": "^2.0.1", + "array-buffer-byte-length": "^1.0.1", + "arraybuffer.prototype.slice": "^1.0.3", + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", + "data-view-buffer": "^1.0.1", + "data-view-byte-length": "^1.0.1", + "data-view-byte-offset": "^1.0.0", + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-set-tostringtag": "^2.0.3", "es-to-primitive": "^1.2.1", - "function-bind": "^1.1.1", - "function.prototype.name": "^1.1.5", - "get-intrinsic": "^1.1.3", - "get-symbol-description": "^1.0.0", + "function.prototype.name": "^1.1.6", + "get-intrinsic": "^1.2.4", + "get-symbol-description": "^1.0.2", "globalthis": "^1.0.3", "gopd": "^1.0.1", - "has": "^1.0.3", - "has-property-descriptors": "^1.0.0", - "has-proto": "^1.0.1", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.0.3", "has-symbols": "^1.0.3", - "internal-slot": "^1.0.4", - "is-array-buffer": "^3.0.1", + "hasown": "^2.0.2", + "internal-slot": "^1.0.7", + "is-array-buffer": "^3.0.4", "is-callable": "^1.2.7", - "is-negative-zero": "^2.0.2", + "is-data-view": "^1.0.1", + "is-negative-zero": "^2.0.3", "is-regex": "^1.1.4", - "is-shared-array-buffer": "^1.0.2", + "is-shared-array-buffer": "^1.0.3", "is-string": "^1.0.7", - "is-typed-array": "^1.1.10", + "is-typed-array": "^1.1.13", "is-weakref": "^1.0.2", - "object-inspect": "^1.12.2", + "object-inspect": "^1.13.1", "object-keys": "^1.1.1", - "object.assign": "^4.1.4", - "regexp.prototype.flags": "^1.4.3", - "safe-regex-test": "^1.0.0", - "string.prototype.trimend": "^1.0.6", - "string.prototype.trimstart": "^1.0.6", - "typed-array-length": "^1.0.4", + "object.assign": "^4.1.5", + "regexp.prototype.flags": "^1.5.2", + "safe-array-concat": "^1.1.2", + "safe-regex-test": "^1.0.3", + "string.prototype.trim": "^1.2.9", + "string.prototype.trimend": "^1.0.8", + "string.prototype.trimstart": "^1.0.8", + "typed-array-buffer": "^1.0.2", + "typed-array-byte-length": "^1.0.1", + "typed-array-byte-offset": "^1.0.2", + "typed-array-length": "^1.0.6", "unbox-primitive": "^1.0.2", - "which-typed-array": "^1.1.9" + "which-typed-array": "^1.1.15" }, "engines": { "node": ">= 0.4" @@ -4084,47 +2332,85 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/es-get-iterator": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/es-get-iterator/-/es-get-iterator-1.1.3.tgz", - "integrity": "sha512-sPZmqHBe6JIiTfN5q2pEi//TwxmAFHwj/XEuYjTuse78i8KxaqMTTzxPoFKuzRpDpTJ+0NAbpfenkmH2rePtuw==", + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.3", - "has-symbols": "^1.0.3", - "is-arguments": "^1.1.1", - "is-map": "^2.0.2", - "is-set": "^2.0.2", - "is-string": "^1.0.7", - "isarray": "^2.0.5", - "stop-iteration-iterator": "^1.0.0" + "get-intrinsic": "^1.2.4" }, - "funding": { - "url": "https://github.com/sponsors/ljharb" + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-iterator-helpers": { + "version": "1.0.19", + "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.0.19.tgz", + "integrity": "sha512-zoMwbCcH5hwUkKJkT8kDIBZSz9I6mVG//+lDCinLCGov4+r7NIy0ld8o03M0cJxl2spVf6ESYVS6/gpIfq1FFw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3", + "es-errors": "^1.3.0", + "es-set-tostringtag": "^2.0.3", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "globalthis": "^1.0.3", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.0.3", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.7", + "iterator.prototype": "^1.1.2", + "safe-array-concat": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.0.0.tgz", + "integrity": "sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw==", + "dev": true, + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" } }, "node_modules/es-set-tostringtag": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.1.tgz", - "integrity": "sha512-g3OMbtlwY3QewlqAiMLI47KywjWZoEytKr8pf6iTC8uJq5bIAH52Z9pnQ8pVL6whrCto53JZDuUIsifGeLorTg==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.3.tgz", + "integrity": "sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ==", "dev": true, "dependencies": { - "get-intrinsic": "^1.1.3", - "has": "^1.0.3", - "has-tostringtag": "^1.0.0" + "get-intrinsic": "^1.2.4", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.1" }, "engines": { "node": ">= 0.4" } }, "node_modules/es-shim-unscopables": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.0.tgz", - "integrity": "sha512-Jm6GPcCdC30eMLbZ2x8z2WuRwAws3zTBBKuusffYVUrNj/GVSUAZ+xKMaUpfNDR5IbyNA5LJbaecoUVbmUcB1w==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz", + "integrity": "sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw==", "dev": true, "dependencies": { - "has": "^1.0.3" + "hasown": "^2.0.0" } }, "node_modules/es-to-primitive": { @@ -4144,14 +2430,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", - "engines": { - "node": ">=6" - } - }, "node_modules/escape-string-regexp": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", @@ -4220,20 +2498,20 @@ } }, "node_modules/eslint-config-next": { - "version": "13.1.6", - "resolved": "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-13.1.6.tgz", - "integrity": "sha512-0cg7h5wztg/SoLAlxljZ0ZPUQ7i6QKqRiP4M2+MgTZtxWwNKb2JSwNc18nJ6/kXBI6xYvPraTbQSIhAuVw6czw==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-14.2.4.tgz", + "integrity": "sha512-Qr0wMgG9m6m4uYy2jrYJmyuNlYZzPRQq5Kvb9IDlYwn+7yq6W6sfMNFgb+9guM1KYwuIo6TIaiFhZJ6SnQ/Efw==", "dev": true, "dependencies": { - "@next/eslint-plugin-next": "13.1.6", - "@rushstack/eslint-patch": "^1.1.3", - "@typescript-eslint/parser": "^5.42.0", + "@next/eslint-plugin-next": "14.2.4", + "@rushstack/eslint-patch": "^1.3.3", + "@typescript-eslint/parser": "^5.4.2 || ^6.0.0 || 7.0.0 - 7.2.0", "eslint-import-resolver-node": "^0.3.6", "eslint-import-resolver-typescript": "^3.5.2", - "eslint-plugin-import": "^2.26.0", - "eslint-plugin-jsx-a11y": "^6.5.1", - "eslint-plugin-react": "^7.31.7", - "eslint-plugin-react-hooks": "^4.5.0" + "eslint-plugin-import": "^2.28.1", + "eslint-plugin-jsx-a11y": "^6.7.1", + "eslint-plugin-react": "^7.33.2", + "eslint-plugin-react-hooks": "^4.5.0 || 5.0.0-canary-7118f5dd7-20230705" }, "peerDependencies": { "eslint": "^7.23.0 || ^8.0.0", @@ -4246,14 +2524,14 @@ } }, "node_modules/eslint-import-resolver-node": { - "version": "0.3.7", - "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.7.tgz", - "integrity": "sha512-gozW2blMLJCeFpBwugLTGyvVjNoeo1knonXAcatC6bjPBZitotxdWf7Gimr25N4c0AAOo4eOUfaG82IJPDpqCA==", + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.9.tgz", + "integrity": "sha512-WFj2isz22JahUv+B788TlO3N6zL3nNJGU8CcZbPZvVEkBPaJdCV4vy5wyghty5ROFbCRnm132v8BScu5/1BQ8g==", "dev": true, "dependencies": { "debug": "^3.2.7", - "is-core-module": "^2.11.0", - "resolve": "^1.22.1" + "is-core-module": "^2.13.0", + "resolve": "^1.22.4" } }, "node_modules/eslint-import-resolver-node/node_modules/debug": { @@ -4266,18 +2544,18 @@ } }, "node_modules/eslint-import-resolver-typescript": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/eslint-import-resolver-typescript/-/eslint-import-resolver-typescript-3.5.3.tgz", - "integrity": "sha512-njRcKYBc3isE42LaTcJNVANR3R99H9bAxBDMNDr2W7yq5gYPxbU3MkdhsQukxZ/Xg9C2vcyLlDsbKfRDg0QvCQ==", + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-typescript/-/eslint-import-resolver-typescript-3.6.1.tgz", + "integrity": "sha512-xgdptdoi5W3niYeuQxKmzVDTATvLYqhpwmykwsh7f6HIOStGWEIL9iqZgQDF9u9OEzrRwR8no5q2VT+bjAujTg==", "dev": true, "dependencies": { "debug": "^4.3.4", - "enhanced-resolve": "^5.10.0", - "get-tsconfig": "^4.2.0", - "globby": "^13.1.2", - "is-core-module": "^2.10.0", - "is-glob": "^4.0.3", - "synckit": "^0.8.4" + "enhanced-resolve": "^5.12.0", + "eslint-module-utils": "^2.7.4", + "fast-glob": "^3.3.1", + "get-tsconfig": "^4.5.0", + "is-core-module": "^2.11.0", + "is-glob": "^4.0.3" }, "engines": { "node": "^14.18.0 || >=16.0.0" @@ -4290,41 +2568,10 @@ "eslint-plugin-import": "*" } }, - "node_modules/eslint-import-resolver-typescript/node_modules/globby": { - "version": "13.1.3", - "resolved": "https://registry.npmjs.org/globby/-/globby-13.1.3.tgz", - "integrity": "sha512-8krCNHXvlCgHDpegPzleMq07yMYTO2sXKASmZmquEYWEmCx6J5UTRbp5RwMJkTJGtcQ44YpiUYUiN0b9mzy8Bw==", - "dev": true, - "dependencies": { - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.11", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^4.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/eslint-import-resolver-typescript/node_modules/slash": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", - "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", - "dev": true, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/eslint-module-utils": { - "version": "2.7.4", - "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.7.4.tgz", - "integrity": "sha512-j4GT+rqzCoRKHwURX7pddtIPGySnX9Si/cgMI5ztrcqOPtk5dDEeZ34CQVPphnqkJytlc97Vuk05Um2mJ3gEQA==", + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.8.1.tgz", + "integrity": "sha512-rXDXR3h7cs7dy9RNpUlQf80nX31XWJEyGq1tRMo+6GsO5VmTe4UTwtmonAD4ZkAsrfMVDA2wlGJ3790Ys+D49Q==", "dev": true, "dependencies": { "debug": "^3.2.7" @@ -4348,26 +2595,28 @@ } }, "node_modules/eslint-plugin-import": { - "version": "2.27.5", - "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.27.5.tgz", - "integrity": "sha512-LmEt3GVofgiGuiE+ORpnvP+kAm3h6MLZJ4Q5HCyHADofsb4VzXFsRiWj3c0OFiV+3DWFh0qg3v9gcPlfc3zRow==", + "version": "2.29.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.29.1.tgz", + "integrity": "sha512-BbPC0cuExzhiMo4Ff1BTVwHpjjv28C5R+btTOGaCRC7UEz801up0JadwkeSk5Ued6TG34uaczuVuH6qyy5YUxw==", "dev": true, "dependencies": { - "array-includes": "^3.1.6", - "array.prototype.flat": "^1.3.1", - "array.prototype.flatmap": "^1.3.1", + "array-includes": "^3.1.7", + "array.prototype.findlastindex": "^1.2.3", + "array.prototype.flat": "^1.3.2", + "array.prototype.flatmap": "^1.3.2", "debug": "^3.2.7", "doctrine": "^2.1.0", - "eslint-import-resolver-node": "^0.3.7", - "eslint-module-utils": "^2.7.4", - "has": "^1.0.3", - "is-core-module": "^2.11.0", + "eslint-import-resolver-node": "^0.3.9", + "eslint-module-utils": "^2.8.0", + "hasown": "^2.0.0", + "is-core-module": "^2.13.1", "is-glob": "^4.0.3", "minimatch": "^3.1.2", - "object.values": "^1.1.6", - "resolve": "^1.22.1", - "semver": "^6.3.0", - "tsconfig-paths": "^3.14.1" + "object.fromentries": "^2.0.7", + "object.groupby": "^1.0.1", + "object.values": "^1.1.7", + "semver": "^6.3.1", + "tsconfig-paths": "^3.15.0" }, "engines": { "node": ">=4" @@ -4407,27 +2656,27 @@ } }, "node_modules/eslint-plugin-jsx-a11y": { - "version": "6.7.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.7.1.tgz", - "integrity": "sha512-63Bog4iIethyo8smBklORknVjB0T2dwB8Mr/hIC+fBS0uyHdYYpzM/Ed+YC8VxTjlXHEWFOdmgwcDn1U2L9VCA==", + "version": "6.8.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.8.0.tgz", + "integrity": "sha512-Hdh937BS3KdwwbBaKd5+PLCOmYY6U4f2h9Z2ktwtNKvIdIEu137rjYbcb9ApSbVJfWxANNuiKTD/9tOKjK9qOA==", "dev": true, "dependencies": { - "@babel/runtime": "^7.20.7", - "aria-query": "^5.1.3", - "array-includes": "^3.1.6", - "array.prototype.flatmap": "^1.3.1", - "ast-types-flow": "^0.0.7", - "axe-core": "^4.6.2", - "axobject-query": "^3.1.1", + "@babel/runtime": "^7.23.2", + "aria-query": "^5.3.0", + "array-includes": "^3.1.7", + "array.prototype.flatmap": "^1.3.2", + "ast-types-flow": "^0.0.8", + "axe-core": "=4.7.0", + "axobject-query": "^3.2.1", "damerau-levenshtein": "^1.0.8", "emoji-regex": "^9.2.2", - "has": "^1.0.3", - "jsx-ast-utils": "^3.3.3", - "language-tags": "=1.0.5", + "es-iterator-helpers": "^1.0.15", + "hasown": "^2.0.0", + "jsx-ast-utils": "^3.3.5", + "language-tags": "^1.0.9", "minimatch": "^3.1.2", - "object.entries": "^1.1.6", - "object.fromentries": "^2.0.6", - "semver": "^6.3.0" + "object.entries": "^1.1.7", + "object.fromentries": "^2.0.7" }, "engines": { "node": ">=4.0" @@ -4436,36 +2685,30 @@ "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8" } }, - "node_modules/eslint-plugin-jsx-a11y/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, "node_modules/eslint-plugin-react": { - "version": "7.32.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.32.2.tgz", - "integrity": "sha512-t2fBMa+XzonrrNkyVirzKlvn5RXzzPwRHtMvLAtVZrt8oxgnTQaYbU6SXTOO1mwQgp1y5+toMSKInnzGr0Knqg==", + "version": "7.34.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.34.2.tgz", + "integrity": "sha512-2HCmrU+/JNigDN6tg55cRDKCQWicYAPB38JGSFDQt95jDm8rrvSUo7YPkOIm5l6ts1j1zCvysNcasvfTMQzUOw==", "dev": true, "dependencies": { - "array-includes": "^3.1.6", - "array.prototype.flatmap": "^1.3.1", - "array.prototype.tosorted": "^1.1.1", + "array-includes": "^3.1.8", + "array.prototype.findlast": "^1.2.5", + "array.prototype.flatmap": "^1.3.2", + "array.prototype.toreversed": "^1.1.2", + "array.prototype.tosorted": "^1.1.3", "doctrine": "^2.1.0", + "es-iterator-helpers": "^1.0.19", "estraverse": "^5.3.0", "jsx-ast-utils": "^2.4.1 || ^3.0.0", "minimatch": "^3.1.2", - "object.entries": "^1.1.6", - "object.fromentries": "^2.0.6", - "object.hasown": "^1.1.2", - "object.values": "^1.1.6", + "object.entries": "^1.1.8", + "object.fromentries": "^2.0.8", + "object.hasown": "^1.1.4", + "object.values": "^1.2.0", "prop-types": "^15.8.1", - "resolve": "^2.0.0-next.4", - "semver": "^6.3.0", - "string.prototype.matchall": "^4.0.8" + "resolve": "^2.0.0-next.5", + "semver": "^6.3.1", + "string.prototype.matchall": "^4.0.11" }, "engines": { "node": ">=4" @@ -4475,9 +2718,9 @@ } }, "node_modules/eslint-plugin-react-hooks": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.0.tgz", - "integrity": "sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g==", + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.2.tgz", + "integrity": "sha512-QzliNJq4GinDBcD8gPB5v0wh6g8q3SUi6EFF0x8N/BL9PoVs0atuGc47ozMRyOWAKdwaZ5OnbOEa3WR+dSGKuQ==", "dev": true, "engines": { "node": ">=10" @@ -4499,12 +2742,12 @@ } }, "node_modules/eslint-plugin-react/node_modules/resolve": { - "version": "2.0.0-next.4", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.4.tgz", - "integrity": "sha512-iMDbmAWtfU+MHpxt/I5iWI7cY6YVEZUQ3MBgPQ++XD1PELuJHIl82xBmObyP2KyQmkNB2dsqF7seoQQiAn5yDQ==", + "version": "2.0.0-next.5", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.5.tgz", + "integrity": "sha512-U7WjGVG9sH8tvjW5SmGbQuui75FiyjAX72HX15DwBBwF9dNiQZRQAg9nnPhYy+TUnE0+VcrttuvNI8oSxZcocA==", "dev": true, "dependencies": { - "is-core-module": "^2.9.0", + "is-core-module": "^2.13.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, @@ -4525,9 +2768,9 @@ } }, "node_modules/eslint-scope": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.1.1.tgz", - "integrity": "sha512-QKQM/UXpIiHcLqJ5AOyIW7XZmzjkzQXYE54n1++wb0u9V/abW3l9uQnxX8Z5Xd18xyKIMTUAyQ0k1e8pz6LUrw==", + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", "dev": true, "dependencies": { "esrecurse": "^4.3.0", @@ -4535,6 +2778,9 @@ }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" } }, "node_modules/eslint-utils": { @@ -4565,23 +2811,123 @@ } }, "node_modules/eslint-visitor-keys": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.3.0.tgz", - "integrity": "sha512-mQ+suqKJVyeuwGYHAdjMFqjCyfl8+Ldnxuyp3ldiMBFKkvytrXUZWaiPCEav8qDHKty44bD+qV1IP4T+w+xXRA==", + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", "dev": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/eslint/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/eslint/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/eslint/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/eslint/node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/espree": { - "version": "9.4.1", - "resolved": "https://registry.npmjs.org/espree/-/espree-9.4.1.tgz", - "integrity": "sha512-XwctdmTO6SIvCzd9810yyNzIrOrqNYV9Koizx4C/mRhf9uq0o4yHoCEU/670pOxOL/MSraektvSAji79kX90Vg==", + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", "dev": true, "dependencies": { - "acorn": "^8.8.0", + "acorn": "^8.9.0", "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^3.3.0" + "eslint-visitor-keys": "^3.4.1" }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -4591,9 +2937,9 @@ } }, "node_modules/esquery": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.4.2.tgz", - "integrity": "sha512-JVSoLdTlTDkmjFmab7H/9SL9qGSyjElT3myyKp7krqjVFQCDLmj1QFaCLRFBszBKI0XVZaiiXvuPIX3ZwHe1Ng==", + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", + "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", "dev": true, "dependencies": { "estraverse": "^5.1.0" @@ -4627,6 +2973,7 @@ "version": "2.0.3", "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, "engines": { "node": ">=0.10.0" } @@ -4638,9 +2985,9 @@ "dev": true }, "node_modules/fast-glob": { - "version": "3.2.12", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", - "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", "dev": true, "dependencies": { "@nodelib/fs.stat": "^2.0.2", @@ -4678,9 +3025,9 @@ "dev": true }, "node_modules/fastq": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", - "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==", + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", "dev": true, "dependencies": { "reusify": "^1.0.4" @@ -4699,10 +3046,10 @@ } }, "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "devOptional": true, + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, "dependencies": { "to-regex-range": "^5.0.1" }, @@ -4732,12 +3079,13 @@ } }, "node_modules/flat-cache": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", - "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", "dev": true, "dependencies": { - "flatted": "^3.1.0", + "flatted": "^3.2.9", + "keyv": "^4.5.3", "rimraf": "^3.0.2" }, "engines": { @@ -4745,9 +3093,9 @@ } }, "node_modules/flatted": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.7.tgz", - "integrity": "sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==", + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.1.tgz", + "integrity": "sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==", "dev": true }, "node_modules/for-each": { @@ -4759,6 +3107,22 @@ "is-callable": "^1.1.3" } }, + "node_modules/foreground-child": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.2.0.tgz", + "integrity": "sha512-CrWQNaEl1/6WeZoarcM9LHupTo3RpZO2Pdk1vktwzPiQTsJnAKJmm3TACKeG5UZbWDfaH2AbvYxzP96y0MT7fA==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.0", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/formik": { "version": "2.2.9", "resolved": "https://registry.npmjs.org/formik/-/formik-2.2.9.tgz", @@ -4782,44 +3146,30 @@ "react": ">=16.8.0" } }, - "node_modules/fs-readdir-recursive": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/fs-readdir-recursive/-/fs-readdir-recursive-1.1.0.tgz", - "integrity": "sha512-GNanXlVr2pf02+sPN40XN8HG+ePaNcvM0q5mZBd668Obwb0yD5GiUbZOFgwn8kGMY6I3mdyDJzieUy3PTYyTRA==" - }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" - }, - "node_modules/fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", - "hasInstallScript": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true }, "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, "node_modules/function.prototype.name": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.5.tgz", - "integrity": "sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA==", + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.6.tgz", + "integrity": "sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.19.0", - "functions-have-names": "^1.2.2" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "functions-have-names": "^1.2.3" }, "engines": { "node": ">= 0.4" @@ -4837,41 +3187,39 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/gensync": { - "version": "1.0.0-beta.2", - "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", - "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", - "engines": { - "node": ">=6.9.0" - } - }, "node_modules/get-browser-rtc": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/get-browser-rtc/-/get-browser-rtc-1.1.0.tgz", "integrity": "sha512-MghbMJ61EJrRsDe7w1Bvqt3ZsBuqhce5nrn/XAwgwOXhcsz53/ltdxOse1h/8eKXj5slzxdsz56g5rzOFSGwfQ==" }, "node_modules/get-intrinsic": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.0.tgz", - "integrity": "sha512-L049y6nFOuom5wGyRc3/gdTLO94dySVKRACj1RmJZBQXlbTMhtNIgkWkUHq+jYmZvKf14EW1EoJnnjbmoHij0Q==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", "dev": true, "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/get-symbol-description": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.0.tgz", - "integrity": "sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.2.tgz", + "integrity": "sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.1" + "call-bind": "^1.0.5", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4" }, "engines": { "node": ">= 0.4" @@ -4881,10 +3229,13 @@ } }, "node_modules/get-tsconfig": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.4.0.tgz", - "integrity": "sha512-0Gdjo/9+FzsYhXCEFueo2aY1z1tpXrxWZzP7k8ul9qt1U5o8rYJwTJYmaeHdrVosYIVYkOy2iwCJ9FdpocJhPQ==", + "version": "4.7.5", + "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.7.5.tgz", + "integrity": "sha512-ZCuZCnlqNzjb4QprAzXKdpp/gh6KTxSJuw3IBsPnV/7fV4NxC9ckB+vPTt8w7fJA0TaSD7c55BR47JD6MEDyDw==", "dev": true, + "dependencies": { + "resolve-pkg-maps": "^1.0.0" + }, "funding": { "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" } @@ -4893,6 +3244,7 @@ "version": "7.1.7", "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", + "deprecated": "Glob versions prior to v9 are no longer supported", "dev": true, "dependencies": { "fs.realpath": "^1.0.0", @@ -4921,33 +3273,22 @@ "node": ">=10.13.0" } }, - "node_modules/glob-to-regexp": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", - "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" - }, "node_modules/globals": { - "version": "13.20.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz", - "integrity": "sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==", - "dev": true, - "dependencies": { - "type-fest": "^0.20.2" - }, + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=4" } }, "node_modules/globalthis": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.3.tgz", - "integrity": "sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.4.tgz", + "integrity": "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==", "dev": true, "dependencies": { - "define-properties": "^1.1.3" + "define-properties": "^1.2.1", + "gopd": "^1.0.1" }, "engines": { "node": ">= 0.4" @@ -4956,12 +3297,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/globalyzer": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/globalyzer/-/globalyzer-0.1.0.tgz", - "integrity": "sha512-40oNTM9UfG6aBmuKxk/giHn5nQ8RVz/SS4Ir6zgzOv9/qC3kKZ9v4etGTcJbEl/NyVQH7FGU7d+X1egr57Md2Q==", - "dev": true - }, "node_modules/globby": { "version": "11.1.0", "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", @@ -4982,12 +3317,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/globrex": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/globrex/-/globrex-0.1.2.tgz", - "integrity": "sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==", - "dev": true - }, "node_modules/gopd": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", @@ -5001,9 +3330,9 @@ } }, "node_modules/graceful-fs": { - "version": "4.2.10", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz", - "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==" + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" }, "node_modules/grapheme-splitter": { "version": "1.0.4", @@ -5011,17 +3340,6 @@ "integrity": "sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==", "dev": true }, - "node_modules/has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "dependencies": { - "function-bind": "^1.1.1" - }, - "engines": { - "node": ">= 0.4.0" - } - }, "node_modules/has-bigints": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz", @@ -5032,30 +3350,29 @@ } }, "node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", "engines": { - "node": ">=8" + "node": ">=4" } }, "node_modules/has-property-descriptors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz", - "integrity": "sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", "dev": true, "dependencies": { - "get-intrinsic": "^1.1.1" + "es-define-property": "^1.0.0" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/has-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", - "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", "dev": true, "engines": { "node": ">= 0.4" @@ -5077,12 +3394,12 @@ } }, "node_modules/has-tostringtag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", - "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", "dev": true, "dependencies": { - "has-symbols": "^1.0.2" + "has-symbols": "^1.0.3" }, "engines": { "node": ">= 0.4" @@ -5091,6 +3408,17 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/hoist-non-react-statics": { "version": "3.3.2", "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", @@ -5139,9 +3467,9 @@ ] }, "node_modules/ignore": { - "version": "5.2.4", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", - "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==", + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", + "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", "dev": true, "engines": { "node": ">= 4" @@ -5175,6 +3503,8 @@ "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, "dependencies": { "once": "^1.3.0", "wrappy": "1" @@ -5186,27 +3516,27 @@ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, "node_modules/internal-slot": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.5.tgz", - "integrity": "sha512-Y+R5hJrzs52QCG2laLn4udYVnxsfny9CpOhNhUvk/SSSVyF6T27FzRbF0sroPidSu3X8oEAkOn2K804mjpt6UQ==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.7.tgz", + "integrity": "sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g==", "dev": true, "dependencies": { - "get-intrinsic": "^1.2.0", - "has": "^1.0.3", + "es-errors": "^1.3.0", + "hasown": "^2.0.0", "side-channel": "^1.0.4" }, "engines": { "node": ">= 0.4" } }, - "node_modules/is-arguments": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.1.tgz", - "integrity": "sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==", + "node_modules/is-array-buffer": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.4.tgz", + "integrity": "sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" + "get-intrinsic": "^1.2.1" }, "engines": { "node": ">= 0.4" @@ -5215,25 +3545,26 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-array-buffer": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.1.tgz", - "integrity": "sha512-ASfLknmY8Xa2XtB4wmbz13Wu202baeA18cJBCeCy0wXUHZF0IPyVEXqKEcd+t2fNSLLL1vC6k7lxZEojNbISXQ==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.3", - "is-typed-array": "^1.1.10" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/is-arrayish": { "version": "0.2.1", "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==" }, + "node_modules/is-async-function": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.0.0.tgz", + "integrity": "sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA==", + "dev": true, + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-bigint": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz", @@ -5246,18 +3577,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", - "optional": true, - "dependencies": { - "binary-extensions": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/is-boolean-object": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz", @@ -5287,11 +3606,26 @@ } }, "node_modules/is-core-module": { - "version": "2.11.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.11.0.tgz", - "integrity": "sha512-RRjxlvLDkD1YJwDbroBHMb+cukurkDWNyHx7D3oNB5x9rb5ogcksMC5wHCadcXoo67gVr/+3GFySh3134zi6rw==", + "version": "2.13.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz", + "integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==", "dependencies": { - "has": "^1.0.3" + "hasown": "^2.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-data-view": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-data-view/-/is-data-view-1.0.1.tgz", + "integrity": "sha512-AHkaJrsUVW6wq6JS8y3JnM/GJF/9cf+k20+iDzlSaJrinEo5+7vRiteOSwBhHRiAyQATN1AmY4hwzxJKPmYf+w==", + "dev": true, + "dependencies": { + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -5312,35 +3646,56 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-docker": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", - "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", - "dev": true, - "bin": { - "is-docker": "cli.js" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", - "devOptional": true, + "dev": true, "engines": { "node": ">=0.10.0" } }, + "node_modules/is-finalizationregistry": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-finalizationregistry/-/is-finalizationregistry-1.0.2.tgz", + "integrity": "sha512-0by5vtUJs8iFQb5TYUHHPudOR+qXYIMKtiUzvLIZITZUjknFmziyBJuLhVRc+Ds0dREFlskDNJKYIdIzu/9pfw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-function": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz", + "integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==", + "dev": true, + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-glob": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", - "devOptional": true, + "dev": true, "dependencies": { "is-extglob": "^2.1.1" }, @@ -5349,18 +3704,21 @@ } }, "node_modules/is-map": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.2.tgz", - "integrity": "sha512-cOZFQQozTha1f4MxLFzlgKYPTyj26picdZTx82hbc/Xf4K/tZOOXSCkMvU4pKioRXGDLJRn0GM7Upe7kR721yg==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.3.tgz", + "integrity": "sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==", "dev": true, + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/is-negative-zero": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.2.tgz", - "integrity": "sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.3.tgz", + "integrity": "sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==", "dev": true, "engines": { "node": ">= 0.4" @@ -5373,7 +3731,7 @@ "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "devOptional": true, + "dev": true, "engines": { "node": ">=0.12.0" } @@ -5419,21 +3777,27 @@ } }, "node_modules/is-set": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.2.tgz", - "integrity": "sha512-+2cnTEZeY5z/iXGbLhPrOAaK/Mau5k5eXq9j14CpRTftq0pAJu2MwVRSZhyZWBzx3o6X795Lz6Bpb6R0GKf37g==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.3.tgz", + "integrity": "sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==", "dev": true, + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/is-shared-array-buffer": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz", - "integrity": "sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.3.tgz", + "integrity": "sha512-nA2hv5XIhLR3uVzDDfCIknerhx8XUKnstuOERPNNIinXG7v9u+ohXF67vxm4TPTEPU6lm61ZkwP3c9PCB97rhg==", "dev": true, "dependencies": { - "call-bind": "^1.0.2" + "call-bind": "^1.0.7" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -5470,16 +3834,12 @@ } }, "node_modules/is-typed-array": { - "version": "1.1.10", - "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.10.tgz", - "integrity": "sha512-PJqgEHiWZvMpaFZ3uTc8kHPM4+4ADTlDniuQL7cU/UDA0Ql7F70yGfHph3cLNe+c9toaigv+DFzTJKhc2CtO6A==", + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.13.tgz", + "integrity": "sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw==", "dev": true, "dependencies": { - "available-typed-arrays": "^1.0.5", - "call-bind": "^1.0.2", - "for-each": "^0.3.3", - "gopd": "^1.0.1", - "has-tostringtag": "^1.0.0" + "which-typed-array": "^1.1.14" }, "engines": { "node": ">= 0.4" @@ -5489,10 +3849,13 @@ } }, "node_modules/is-weakmap": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.1.tgz", - "integrity": "sha512-NSBR4kH5oVj1Uwvv970ruUkCV7O1mzgVFO4/rev2cLRda9Tm9HrL70ZPut4rOHgY0FNrUu9BCbXA2sdQ+x0chA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.2.tgz", + "integrity": "sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==", "dev": true, + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -5510,35 +3873,25 @@ } }, "node_modules/is-weakset": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.2.tgz", - "integrity": "sha512-t2yVvttHkQktwnNNmBQ98AhENLdPUTDTE21uPqAQ0ARwQfGeQKRVS0NNurH7bTf7RrvcVn1OOge45CnBeHCSmg==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.3.tgz", + "integrity": "sha512-LvIm3/KWzS9oRFHugab7d+M/GcBXuXX5xZkzPmN+NxihdQlZUQ4dWuSV1xR/sq6upL1TJEDrfBgRepHFdBtSNQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.1" + "call-bind": "^1.0.7", + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-wsl": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", - "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", - "dev": true, - "dependencies": { - "is-docker": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/isarray": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", - "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", - "dev": true + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==" }, "node_modules/isexe": { "version": "2.0.0", @@ -5546,10 +3899,41 @@ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", "dev": true }, + "node_modules/iterator.prototype": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.2.tgz", + "integrity": "sha512-DR33HMMr8EzwuRL8Y9D3u2BMj8+RqSE850jfGu59kS7tbmPLzGkZmVSfyCFSDxuZiEY6Rzt3T2NA/qU+NwVj1w==", + "dev": true, + "dependencies": { + "define-properties": "^1.2.1", + "get-intrinsic": "^1.2.1", + "has-symbols": "^1.0.3", + "reflect.getprototypeof": "^1.0.4", + "set-function-name": "^2.0.1" + } + }, + "node_modules/jackspeak": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz", + "integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==", + "dev": true, + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, "node_modules/js-sdsl": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/js-sdsl/-/js-sdsl-4.3.0.tgz", - "integrity": "sha512-mifzlm2+5nZ+lEcLJMoBK0/IH/bDg8XnJfd/Wq6IP+xoCjLZsTOnV2QpxlVbX9bMnkl5PdEjNtBJ9Cj1NjifhQ==", + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/js-sdsl/-/js-sdsl-4.4.2.tgz", + "integrity": "sha512-dwXFwByc/ajSV6m5bcKAPwe4yDDF6D614pxmIi5odytzxRlwqF6nwoiCek80Ixc7Cvma5awClxrzFtxCQvcM8w==", "dev": true, "funding": { "type": "opencollective", @@ -5584,6 +3968,12 @@ "node": ">=4" } }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true + }, "node_modules/json-parse-even-better-errors": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", @@ -5614,31 +4004,45 @@ } }, "node_modules/jsx-ast-utils": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.3.tgz", - "integrity": "sha512-fYQHZTZ8jSfmWZ0iyzfwiU4WDX4HpHbMCZ3gPlWYiCl3BoeOTsqKBqnTVfH2rYT7eP5c3sVbeSPHnnJOaTrWiw==", + "version": "3.3.5", + "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz", + "integrity": "sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==", "dev": true, "dependencies": { - "array-includes": "^3.1.5", - "object.assign": "^4.1.3" + "array-includes": "^3.1.6", + "array.prototype.flat": "^1.3.1", + "object.assign": "^4.1.4", + "object.values": "^1.1.6" }, "engines": { "node": ">=4.0" } }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, "node_modules/language-subtag-registry": { - "version": "0.3.22", - "resolved": "https://registry.npmjs.org/language-subtag-registry/-/language-subtag-registry-0.3.22.tgz", - "integrity": "sha512-tN0MCzyWnoz/4nHS6uxdlFWoUZT7ABptwKPQ52Ea7URk6vll88bWBVhodtnlfEuCcKWNGoc+uGbw1cwa9IKh/w==", + "version": "0.3.23", + "resolved": "https://registry.npmjs.org/language-subtag-registry/-/language-subtag-registry-0.3.23.tgz", + "integrity": "sha512-0K65Lea881pHotoGEa5gDlMxt3pctLi2RplBb7Ezh4rRdLEOtgi7n4EwK9lamnUCkKBqaeKRVebTq6BAxSkpXQ==", "dev": true }, "node_modules/language-tags": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/language-tags/-/language-tags-1.0.5.tgz", - "integrity": "sha512-qJhlO9cGXi6hBGKoxEG/sKZDAHD5Hnu9Hs4WbOY3pCWXDhw0N8x1NenNzm2EnNLkLkk7J2SdxAkDSbb6ftT+UQ==", + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/language-tags/-/language-tags-1.0.9.tgz", + "integrity": "sha512-MbjN408fEndfiQXbFQ1vnd+1NoLDsnQW41410oQBXiyXDMYH5z505juWa4KUE1LqxRC7DgOgZDbKLxHIwm27hA==", "dev": true, "dependencies": { - "language-subtag-registry": "~0.3.2" + "language-subtag-registry": "^0.3.20" + }, + "engines": { + "node": ">=0.10" } }, "node_modules/levn": { @@ -5684,11 +4088,6 @@ "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==" }, - "node_modules/lodash.debounce": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", - "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" - }, "node_modules/lodash.merge": { "version": "4.6.2", "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", @@ -5707,35 +4106,12 @@ } }, "node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "version": "10.2.2", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.2.2.tgz", + "integrity": "sha512-9hp3Vp2/hFQUiIwKo8XCeFVnrg8Pk3TYNPIR7tJADKi5YfcF7vEaK7avFHTlSy3kOKYaJQaalfEo6YuXdceBOQ==", "dev": true, - "dependencies": { - "yallist": "^4.0.0" - }, "engines": { - "node": ">=10" - } - }, - "node_modules/make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dependencies": { - "pify": "^4.0.1", - "semver": "^5.6.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/make-dir/node_modules/semver": { - "version": "5.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", - "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", - "bin": { - "semver": "bin/semver" + "node": "14 || >=16.14" } }, "node_modules/merge2": { @@ -5748,12 +4124,12 @@ } }, "node_modules/micromatch": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", - "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.7.tgz", + "integrity": "sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q==", "dev": true, "dependencies": { - "braces": "^3.0.2", + "braces": "^3.0.3", "picomatch": "^2.3.1" }, "engines": { @@ -5764,6 +4140,7 @@ "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, "dependencies": { "brace-expansion": "^1.1.7" }, @@ -5779,6 +4156,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, "node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", @@ -5817,37 +4203,38 @@ "dev": true }, "node_modules/next": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/next/-/next-13.5.6.tgz", - "integrity": "sha512-Y2wTcTbO4WwEsVb4A8VSnOsG1I9ok+h74q0ZdxkwM3EODqrs4pasq7O0iUxbcS9VtWMicG7f3+HAj0r1+NtKSw==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.4.tgz", + "integrity": "sha512-R8/V7vugY+822rsQGQCjoLhMuC9oFj9SOi4Cl4b2wjDrseD0LRZ10W7R6Czo4w9ZznVSshKjuIomsRjvm9EKJQ==", "dependencies": { - "@next/env": "13.5.6", - "@swc/helpers": "0.5.2", + "@next/env": "14.2.4", + "@swc/helpers": "0.5.5", "busboy": "1.6.0", - "caniuse-lite": "^1.0.30001406", + "caniuse-lite": "^1.0.30001579", + "graceful-fs": "^4.2.11", "postcss": "8.4.31", - "styled-jsx": "5.1.1", - "watchpack": "2.4.0" + "styled-jsx": "5.1.1" }, "bin": { "next": "dist/bin/next" }, "engines": { - "node": ">=16.14.0" + "node": ">=18.17.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "13.5.6", - "@next/swc-darwin-x64": "13.5.6", - "@next/swc-linux-arm64-gnu": "13.5.6", - "@next/swc-linux-arm64-musl": "13.5.6", - "@next/swc-linux-x64-gnu": "13.5.6", - "@next/swc-linux-x64-musl": "13.5.6", - "@next/swc-win32-arm64-msvc": "13.5.6", - "@next/swc-win32-ia32-msvc": "13.5.6", - "@next/swc-win32-x64-msvc": "13.5.6" + "@next/swc-darwin-arm64": "14.2.4", + "@next/swc-darwin-x64": "14.2.4", + "@next/swc-linux-arm64-gnu": "14.2.4", + "@next/swc-linux-arm64-musl": "14.2.4", + "@next/swc-linux-x64-gnu": "14.2.4", + "@next/swc-linux-x64-musl": "14.2.4", + "@next/swc-win32-arm64-msvc": "14.2.4", + "@next/swc-win32-ia32-msvc": "14.2.4", + "@next/swc-win32-x64-msvc": "14.2.4" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", + "@playwright/test": "^1.41.2", "react": "^18.2.0", "react-dom": "^18.2.0", "sass": "^1.3.0" @@ -5856,25 +4243,14 @@ "@opentelemetry/api": { "optional": true }, + "@playwright/test": { + "optional": true + }, "sass": { "optional": true } } }, - "node_modules/node-releases": { - "version": "2.0.12", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.12.tgz", - "integrity": "sha512-QzsYKWhXTWx8h1kIvqfnC++o0pEmpRQA/aenALsL2F4pqNVr7YzcdMlDij5WBnwftRbJCNJL/O7zdKaxKPHqgQ==" - }, - "node_modules/normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", - "optional": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/nprogress": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz", @@ -5889,30 +4265,14 @@ } }, "node_modules/object-inspect": { - "version": "1.12.3", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", - "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==", + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", + "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", "dev": true, "funding": { "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/object-is": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.5.tgz", - "integrity": "sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/object-keys": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", @@ -5923,13 +4283,13 @@ } }, "node_modules/object.assign": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.4.tgz", - "integrity": "sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==", + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.5.tgz", + "integrity": "sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", "has-symbols": "^1.0.3", "object-keys": "^1.1.1" }, @@ -5941,28 +4301,29 @@ } }, "node_modules/object.entries": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.6.tgz", - "integrity": "sha512-leTPzo4Zvg3pmbQ3rDK69Rl8GQvIqMWubrkxONG9/ojtFE2rD9fjMKfSI5BxW3osRH1m6VdzmqK8oAY9aT4x5w==", + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.8.tgz", + "integrity": "sha512-cmopxi8VwRIAw/fkijJohSfpef5PdN0pMQJN6VC/ZKvn0LIknWD8KtgY6KlQdEc4tIjcQ3HxSMmnvtzIscdaYQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" }, "engines": { "node": ">= 0.4" } }, "node_modules/object.fromentries": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.6.tgz", - "integrity": "sha512-VciD13dswC4j1Xt5394WR4MzmAQmlgN72phd/riNp9vtD7tp4QQWJ0R4wvclXcafgcYK8veHRed2W6XeGBvcfg==", + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.8.tgz", + "integrity": "sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0" }, "engines": { "node": ">= 0.4" @@ -5971,28 +4332,46 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/object.hasown": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/object.hasown/-/object.hasown-1.1.2.tgz", - "integrity": "sha512-B5UIT3J1W+WuWIU55h0mjlwaqxiE5vYENJXIXZ4VFe05pNYrkKuK0U/6aFcb0pKywYJh7IhfoqUfKVmrJJHZHw==", + "node_modules/object.groupby": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/object.groupby/-/object.groupby-1.0.3.tgz", + "integrity": "sha512-+Lhy3TQTuzXI5hevh8sBGqbmurHbbIjAi0Z4S63nthVLmLxfbj4T54a4CfZrXIrt9iP4mVAPYMo/v99taj3wjQ==", "dev": true, "dependencies": { - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.hasown": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/object.hasown/-/object.hasown-1.1.4.tgz", + "integrity": "sha512-FZ9LZt9/RHzGySlBARE3VF+gE26TxR38SdmqOqliuTnl9wrKulaQs+4dee1V+Io8VfxqzAfHu6YuRgUy8OHoTg==", + "dev": true, + "dependencies": { + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/object.values": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.6.tgz", - "integrity": "sha512-FVVTkD1vENCsAcwNs9k6jea2uHC/X0+JcjG8YA60FN5CMaJmG95wT9jek/xX9nornqGRrBkKtzuAu2wuHpKqvw==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.2.0.tgz", + "integrity": "sha512-yBYjY9QX2hnRmZHAjG/f13MzmBzxzYgQhFrke06TTyKY5zSTEqkOeukBzIdVA3j3ulu8Qa3MbVFShV7T2RmGtQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" }, "engines": { "node": ">= 0.4" @@ -6005,31 +4384,15 @@ "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, "dependencies": { "wrappy": "1" } }, - "node_modules/open": { - "version": "8.4.2", - "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", - "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==", - "dev": true, - "dependencies": { - "define-lazy-prop": "^2.0.0", - "is-docker": "^2.1.1", - "is-wsl": "^2.2.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/optionator": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", - "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", "dev": true, "dependencies": { "deep-is": "^0.1.3", @@ -6037,7 +4400,7 @@ "levn": "^0.4.1", "prelude-ls": "^1.2.1", "type-check": "^0.4.0", - "word-wrap": "^1.2.3" + "word-wrap": "^1.2.5" }, "engines": { "node": ">= 0.8.0" @@ -6114,6 +4477,7 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, "engines": { "node": ">=0.10.0" } @@ -6132,6 +4496,22 @@ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "dev": true, + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/path-type": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", @@ -6141,15 +4521,15 @@ } }, "node_modules/picocolors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", - "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", + "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==" }, "node_modules/picomatch": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "devOptional": true, + "dev": true, "engines": { "node": ">=8.6" }, @@ -6157,12 +4537,13 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, - "node_modules/pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", + "node_modules/possible-typed-array-names": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz", + "integrity": "sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q==", + "dev": true, "engines": { - "node": ">=6" + "node": ">= 0.4" } }, "node_modules/postcss": { @@ -6227,14 +4608,14 @@ "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" }, "node_modules/property-expr": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/property-expr/-/property-expr-2.0.5.tgz", - "integrity": "sha512-IJUkICM5dP5znhCckHSv30Q4b5/JA5enCtkRHYaOVOAocnH/1BQEYTC5NMfT3AVl/iXKdr3aqQbQn9DxyWknwA==" + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/property-expr/-/property-expr-2.0.6.tgz", + "integrity": "sha512-SVtmxhRE/CGkn3eZY1T6pC8Nln6Fr/lu1mKSgRud0eC73whjGfoAogbn78LkD8aFL0zz3bAFerKSnOl7NlErBA==" }, "node_modules/punycode": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", - "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", "dev": true, "engines": { "node": ">=6" @@ -6268,9 +4649,9 @@ } }, "node_modules/react": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz", - "integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==", + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", "dependencies": { "loose-envify": "^1.1.0" }, @@ -6291,15 +4672,15 @@ } }, "node_modules/react-dom": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", - "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==", + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", "dependencies": { "loose-envify": "^1.1.0", - "scheduler": "^0.23.0" + "scheduler": "^0.23.2" }, "peerDependencies": { - "react": "^18.2.0" + "react": "^18.3.1" } }, "node_modules/react-fast-compare": { @@ -6308,9 +4689,9 @@ "integrity": "sha512-suNP+J1VU1MWFKcyt7RtjiSWUjvidmQSlqu+eHslq+342xCbGTYmC0mEhPCOHxlW0CywylOC1u2DFAT+bv4dBw==" }, "node_modules/react-is": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", - "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==" + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==" }, "node_modules/react-transition-group": { "version": "4.4.5", @@ -6338,61 +4719,42 @@ "string_decoder": "~0.10.x" } }, - "node_modules/readable-stream/node_modules/isarray": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", - "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==" - }, - "node_modules/readdirp": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", - "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", - "optional": true, + "node_modules/reflect.getprototypeof": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.6.tgz", + "integrity": "sha512-fmfw4XgoDke3kdI6h4xcUz1dG8uaiv5q9gcEwLS4Pnth2kxT+GZ7YehS1JTMGBQmtV7Y4GFGbs2re2NqhdozUg==", + "dev": true, "dependencies": { - "picomatch": "^2.2.1" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.1", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "globalthis": "^1.0.3", + "which-builtin-type": "^1.1.3" }, "engines": { - "node": ">=8.10.0" - } - }, - "node_modules/regenerate": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", - "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" - }, - "node_modules/regenerate-unicode-properties": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.0.tgz", - "integrity": "sha512-d1VudCLoIGitcU/hEg2QqvyGZQmdC0Lf8BqdOMXGFSvJP4bNV1+XqbPQeHHLD51Jh4QJJ225dlIFvY4Ly6MXmQ==", - "dependencies": { - "regenerate": "^1.4.2" + "node": ">= 0.4" }, - "engines": { - "node": ">=4" + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/regenerator-runtime": { - "version": "0.13.11", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", - "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==" - }, - "node_modules/regenerator-transform": { - "version": "0.15.1", - "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.1.tgz", - "integrity": "sha512-knzmNAcuyxV+gQCufkYcvOqX/qIIfHLv0u5x79kRxuGojfYVky1f15TzZEu2Avte8QGepvUNTnLskf8E6X6Vyg==", - "dependencies": { - "@babel/runtime": "^7.8.4" - } + "version": "0.14.1", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" }, "node_modules/regexp.prototype.flags": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.4.3.tgz", - "integrity": "sha512-fjggEOO3slI6Wvgjwflkc4NFRCTZAu5CnNfBd5qOMYhWdn67nJBBu34/TkD++eeFmd8C9r9jfXJ27+nSiRkSUA==", + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.2.tgz", + "integrity": "sha512-NcDiDkTLuPR+++OCKB0nWafEmhg/Da8aUPLPMQbK+bxKKCm1/S5he+AqYa4PlMCVBalb4/yxIRub6qkEx5yJbw==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "functions-have-names": "^1.2.2" + "call-bind": "^1.0.6", + "define-properties": "^1.2.1", + "es-errors": "^1.3.0", + "set-function-name": "^2.0.1" }, "engines": { "node": ">= 0.4" @@ -6413,47 +4775,12 @@ "url": "https://github.com/sponsors/mysticatea" } }, - "node_modules/regexpu-core": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz", - "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==", - "dependencies": { - "@babel/regjsgen": "^0.8.0", - "regenerate": "^1.4.2", - "regenerate-unicode-properties": "^10.1.0", - "regjsparser": "^0.9.1", - "unicode-match-property-ecmascript": "^2.0.0", - "unicode-match-property-value-ecmascript": "^2.1.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/regjsparser": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz", - "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", - "dependencies": { - "jsesc": "~0.5.0" - }, - "bin": { - "regjsparser": "bin/parser" - } - }, - "node_modules/regjsparser/node_modules/jsesc": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", - "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", - "bin": { - "jsesc": "bin/jsesc" - } - }, "node_modules/resolve": { - "version": "1.22.1", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz", - "integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==", + "version": "1.22.8", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", + "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", "dependencies": { - "is-core-module": "^2.9.0", + "is-core-module": "^2.13.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, @@ -6472,6 +4799,15 @@ "node": ">=4" } }, + "node_modules/resolve-pkg-maps": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", + "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", + "dev": true, + "funding": { + "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" + } + }, "node_modules/reusify": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", @@ -6494,6 +4830,7 @@ "version": "3.0.2", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", "dev": true, "dependencies": { "glob": "^7.1.3" @@ -6528,41 +4865,79 @@ "queue-microtask": "^1.2.2" } }, - "node_modules/safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - }, - "node_modules/safe-regex-test": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.0.tgz", - "integrity": "sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA==", + "node_modules/safe-array-concat": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.2.tgz", + "integrity": "sha512-vj6RsCsWBCf19jIeHEfkRMw8DPiBb+DMXklQ/1SGDHOMlHdPUkZXFQ2YdplS23zESTijAcurb1aSgJA3AgMu1Q==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.3", + "call-bind": "^1.0.7", + "get-intrinsic": "^1.2.4", + "has-symbols": "^1.0.3", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">=0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safe-array-concat/node_modules/isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + "dev": true + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safe-regex-test": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.3.tgz", + "integrity": "sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", "is-regex": "^1.1.4" }, + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/scheduler": { - "version": "0.23.0", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", - "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==", + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", "dependencies": { "loose-envify": "^1.1.0" } }, "node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", "dev": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, "bin": { "semver": "bin/semver.js" }, @@ -6570,6 +4945,38 @@ "node": ">=10" } }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dev": true, + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/set-function-name": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/set-function-name/-/set-function-name-2.0.2.tgz", + "integrity": "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==", + "dev": true, + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "functions-have-names": "^1.2.3", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/shallowequal": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", @@ -6597,19 +5004,35 @@ } }, "node_modules/side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", "dev": true, "dependencies": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/simple-peer": { "version": "9.11.1", "resolved": "https://registry.npmjs.org/simple-peer/-/simple-peer-9.11.1.tgz", @@ -6651,25 +5074,6 @@ "node": ">= 6" } }, - "node_modules/simple-peer/node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, "node_modules/simple-peer/node_modules/string_decoder": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", @@ -6679,20 +5083,22 @@ } }, "node_modules/simplebar-core": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/simplebar-core/-/simplebar-core-1.2.1.tgz", - "integrity": "sha512-dgX6qDOSDM3+crxFstIKOlxyxnD3NYGRPm7CqmAtnNfFLVeadrydym5eNpduIO7aDFU/rayS6hEdrcxO0WBqnQ==", + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/simplebar-core/-/simplebar-core-1.2.5.tgz", + "integrity": "sha512-33AVCYXS8yavWId0GbE4TG1cYELsYybpCKWHJYuWEY/j6nccgz6zQdJ7nCqOpIGo7HgPPbkSSSIlJhi43fHP6A==", "dependencies": { + "@types/lodash-es": "^4.17.6", "can-use-dom": "^0.1.0", + "lodash": "^4.17.21", "lodash-es": "^4.17.21" } }, "node_modules/simplebar-react": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/simplebar-react/-/simplebar-react-3.2.1.tgz", - "integrity": "sha512-viHQs/34ZQk956b88Kl7+VckWO8DKOhg2Hkl8kjPv2q16w+/nnOJOGhZKrpj/egMGqDpQywou2AhdRkAM0oddA==", + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/simplebar-react/-/simplebar-react-3.2.5.tgz", + "integrity": "sha512-ZstHCBF1Is2Lj+Un8NUYSHVCmn8ufi25ylP9UH2bDnASa+V+M+6/thGhUZOZ7YNpFFHTNgVIID3FHdwRqNuqZA==", "dependencies": { - "simplebar-core": "^1.2.1" + "simplebar-core": "^1.2.5" }, "peerDependencies": { "react": ">=16.8.0" @@ -6708,13 +5114,13 @@ } }, "node_modules/socket.io-client": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/socket.io-client/-/socket.io-client-4.6.2.tgz", - "integrity": "sha512-OwWrMbbA8wSqhBAR0yoPK6EdQLERQAYjXb3A0zLpgxfM1ZGLKoxHx8gVmCHA6pcclRX5oA/zvQf7bghAS11jRA==", + "version": "4.7.5", + "resolved": "https://registry.npmjs.org/socket.io-client/-/socket.io-client-4.7.5.tgz", + "integrity": "sha512-sJ/tqHOCe7Z50JCBCXrsY3I2k03iOiUe+tj1OmKeD2lXPiGH/RUCdTZFoqVyN7l1MnpIzPrGtLcijffmeouNlQ==", "dependencies": { "@socket.io/component-emitter": "~3.1.0", "debug": "~4.3.2", - "engine.io-client": "~6.4.0", + "engine.io-client": "~6.5.2", "socket.io-parser": "~4.2.4" }, "engines": { @@ -6742,25 +5148,13 @@ } }, "node_modules/source-map-js": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", - "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", + "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", "engines": { "node": ">=0.10.0" } }, - "node_modules/stop-iteration-iterator": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/stop-iteration-iterator/-/stop-iteration-iterator-1.0.0.tgz", - "integrity": "sha512-iCGQj+0l0HOdZ2AEeBADlsRC+vsnDsZsbdSiH1yNSjcfKM7fdpCMfqAL/dwF5BLiw/XhRft/Wax6zQbhq2BcjQ==", - "dev": true, - "dependencies": { - "internal-slot": "^1.0.4" - }, - "engines": { - "node": ">= 0.4" - } - }, "node_modules/streamsearch": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz", @@ -6774,48 +5168,141 @@ "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", "integrity": "sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ==" }, - "node_modules/string.prototype.matchall": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.8.tgz", - "integrity": "sha512-6zOCOcJ+RJAQshcTvXPHoxoQGONa3e/Lqx90wUA+wEzX78sg5Bo+1tQo4N0pohS0erG9qtCqJDjNCQBjeWVxyg==", + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "get-intrinsic": "^1.1.3", + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/string-width/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/string-width/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/string.prototype.matchall": { + "version": "4.0.11", + "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.11.tgz", + "integrity": "sha512-NUdh0aDavY2og7IbBPenWqR9exH+E26Sv8e0/eTe1tltDGZL+GtBkDAnnyBtmekfK6/Dq3MkcGtzXFEd1LQrtg==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", "has-symbols": "^1.0.3", - "internal-slot": "^1.0.3", - "regexp.prototype.flags": "^1.4.3", - "side-channel": "^1.0.4" + "internal-slot": "^1.0.7", + "regexp.prototype.flags": "^1.5.2", + "set-function-name": "^2.0.2", + "side-channel": "^1.0.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trim": { + "version": "1.2.9", + "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.9.tgz", + "integrity": "sha512-klHuCNxiMZ8MlsOihJhJEBJAiMVqU3Z2nEXWfWnIqjN0gEFS9J9+IxKozWWtQGcgoa1WUZzLjKPTr4ZHNFTFxw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.0", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/string.prototype.trimend": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.6.tgz", - "integrity": "sha512-JySq+4mrPf9EsDBEDYMOb/lM7XQLulwg5R/m1r0PXEFqrV0qHvl58sdTilSXtKOflCsK2E8jxf+GKC0T07RWwQ==", + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.8.tgz", + "integrity": "sha512-p73uL5VCHCO2BZZ6krwwQE3kCzM7NKmis8S//xEC6fQonchbum4eP6kR4DLEjQFO3Wnj3Fuo8NM0kOSjVdHjZQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/string.prototype.trimstart": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.6.tgz", - "integrity": "sha512-omqjMDaY92pbn5HOX7f9IccLA+U1tA9GvtU4JrodiXFfYB7jPzzHpRzpglLAjtUV6bB557zwClJezTqnAiYnQA==", + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz", + "integrity": "sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -6833,6 +5320,19 @@ "node": ">=8" } }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/strip-bom": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", @@ -6855,26 +5355,19 @@ } }, "node_modules/styled-components": { - "version": "6.0.0-rc.3", - "resolved": "https://registry.npmjs.org/styled-components/-/styled-components-6.0.0-rc.3.tgz", - "integrity": "sha512-5FbCTxynopck99GRwM5Ey0+VRp8pkQq69TwGOJJeYtR7gPvwGjNx8yBPLN7/dfxwwvn9ymOZYB19eQkv2k70wQ==", + "version": "6.1.11", + "resolved": "https://registry.npmjs.org/styled-components/-/styled-components-6.1.11.tgz", + "integrity": "sha512-Ui0jXPzbp1phYij90h12ksljKGqF8ncGx+pjrNPsSPhbUUjWT2tD1FwGo2LF6USCnbrsIhNngDfodhxbegfEOA==", "dependencies": { - "@babel/cli": "^7.21.0", - "@babel/core": "^7.21.0", - "@babel/helper-module-imports": "^7.18.6", - "@babel/plugin-external-helpers": "^7.18.6", - "@babel/plugin-proposal-class-properties": "^7.18.6", - "@babel/plugin-proposal-object-rest-spread": "^7.20.7", - "@babel/preset-env": "^7.20.2", - "@babel/preset-react": "^7.18.6", - "@babel/preset-typescript": "^7.21.0", - "@babel/traverse": "^7.21.2", - "@emotion/unitless": "^0.8.0", - "css-to-react-native": "^3.2.0", - "postcss": "^8.4.23", - "shallowequal": "^1.1.0", - "stylis": "^4.2.0", - "tslib": "^2.5.0" + "@emotion/is-prop-valid": "1.2.2", + "@emotion/unitless": "0.8.1", + "@types/stylis": "4.2.5", + "css-to-react-native": "3.2.0", + "csstype": "3.1.3", + "postcss": "8.4.38", + "shallowequal": "1.1.0", + "stylis": "4.3.2", + "tslib": "2.6.2" }, "engines": { "node": ">= 16" @@ -6884,25 +5377,46 @@ "url": "https://opencollective.com/styled-components" }, "peerDependencies": { - "babel-plugin-styled-components": ">= 2", "react": ">= 16.8.0", "react-dom": ">= 16.8.0" - }, - "peerDependenciesMeta": { - "babel-plugin-styled-components": { - "optional": true + } + }, + "node_modules/styled-components/node_modules/postcss": { + "version": "8.4.38", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.38.tgz", + "integrity": "sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } + ], + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.0.0", + "source-map-js": "^1.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14" } }, "node_modules/styled-components/node_modules/stylis": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", - "integrity": "sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==" + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.2.tgz", + "integrity": "sha512-bhtUjWd/z6ltJiQwg0dUfxEJ+W+jdqQd8TbWLWyeIJHlnsqmGLRFFd8e5mA0AZi/zx90smXRlN66YMTcaSFifg==" }, "node_modules/styled-components/node_modules/tslib": { - "version": "2.5.3", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.5.3.tgz", - "integrity": "sha512-mSxlJJwl3BMEQCUNnxXBU9jP4JBktcEGhURcPR6VQVlnP0FdDEsIaz0C35dXNGLyRfrATNofF0F5p2KPxQgB+w==" + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", + "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" }, "node_modules/styled-jsx": { "version": "5.1.1", @@ -6932,15 +5446,14 @@ "integrity": "sha512-GP6WDNWf+o403jrEp9c5jibKavrtLW+/qYGhFxFrG8maXhwTBI7gLLhiBb0o7uFccWN+EOS9aMO6cGHWAO07OA==" }, "node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", "dependencies": { - "has-flag": "^4.0.0" + "has-flag": "^3.0.0" }, "engines": { - "node": ">=8" + "node": ">=4" } }, "node_modules/supports-preserve-symlinks-flag": { @@ -7037,28 +5550,6 @@ "node": ">= 0.8.0" } }, - "node_modules/synckit": { - "version": "0.8.5", - "resolved": "https://registry.npmjs.org/synckit/-/synckit-0.8.5.tgz", - "integrity": "sha512-L1dapNV6vu2s/4Sputv8xGsCdAVlb5nRDMFU/E27D44l5U6cw1g0dGd45uLc+OXjNMmF4ntiMdCimzcjFKQI8Q==", - "dev": true, - "dependencies": { - "@pkgr/utils": "^2.3.1", - "tslib": "^2.5.0" - }, - "engines": { - "node": "^14.18.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/unts" - } - }, - "node_modules/synckit/node_modules/tslib": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.5.0.tgz", - "integrity": "sha512-336iVw3rtn2BUK7ORdIAHTyxHGRIHVReokCR3XjbckJMK7ms8FysBfhLR8IXnAgy7T0PTPNBWKiH514FOW/WSg==", - "dev": true - }, "node_modules/tapable": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", @@ -7093,16 +5584,6 @@ "resolved": "https://registry.npmjs.org/tiny-case/-/tiny-case-1.0.3.tgz", "integrity": "sha512-Eet/eeMhkO6TX8mnUteS9zgPbUMQa4I6Kkp5ORiBD5476/m+PIRiumP5tmh5ioJpH7k51Kehawy2UDfsnxxY8Q==" }, - "node_modules/tiny-glob": { - "version": "0.2.9", - "resolved": "https://registry.npmjs.org/tiny-glob/-/tiny-glob-0.2.9.tgz", - "integrity": "sha512-g/55ssRPUjShh+xkfx9UPDXqhckHEsHr4Vd9zX55oSdGZc/MD0m3sferOkwWtp98bv+kcVfEHtRJgBVJzelrzg==", - "dev": true, - "dependencies": { - "globalyzer": "0.1.0", - "globrex": "^0.1.2" - } - }, "node_modules/tiny-warning": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", @@ -7120,7 +5601,7 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "devOptional": true, + "dev": true, "dependencies": { "is-number": "^7.0.0" }, @@ -7134,13 +5615,13 @@ "integrity": "sha512-0a5EOkAUp8D4moMi2W8ZF8jcga7BgZd91O/yabJCFY8az+XSzeGyTKs0Aoo897iV1Nj6guFq8orWDS96z91oGg==" }, "node_modules/tsconfig-paths": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.14.1.tgz", - "integrity": "sha512-fxDhWnFSLt3VuTwtvJt5fpwxBHg5AdKWMsgcPOOIilyjymcYVZoCQF8fvFRezCNfblEXmi+PcM1eYHeOAgXCOQ==", + "version": "3.15.0", + "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.15.0.tgz", + "integrity": "sha512-2Ac2RgzDe/cn48GvOe3M+o82pEFewD3UPbyoUHHdKasHwJKjds4fLXWf/Ux5kATBKN20oaFGu+jbElp1pos0mg==", "dev": true, "dependencies": { "@types/json5": "^0.0.29", - "json5": "^1.0.1", + "json5": "^1.0.2", "minimist": "^1.2.6", "strip-bom": "^3.0.0" } @@ -7178,35 +5659,93 @@ } }, "node_modules/type-fest": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", - "dev": true, + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", + "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", "engines": { - "node": ">=10" + "node": ">=12.20" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/typed-array-length": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.4.tgz", - "integrity": "sha512-KjZypGq+I/H7HI5HlOoGHkWUUGq+Q0TPhQurLbyrVrvnKTBgzLhIJ7j6J/XTQOi0d1RjyZ0wdas8bKs2p0x3Ng==", + "node_modules/typed-array-buffer": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.2.tgz", + "integrity": "sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/typed-array-byte-length": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.1.tgz", + "integrity": "sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", "for-each": "^0.3.3", - "is-typed-array": "^1.1.9" + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-byte-offset": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.2.tgz", + "integrity": "sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA==", + "dev": true, + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-length": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.6.tgz", + "integrity": "sha512-/OxDN6OtAk5KBpGb28T+HZc2M+ADtvRxXrKKbUwtsLgdoxgX13hyy7ek6bFRl5+aBs2yZzB0c4CnQfAtVypW/g==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13", + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/typescript": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", - "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==", + "version": "5.4.5", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.4.5.tgz", + "integrity": "sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==", "dev": true, "peer": true, "bin": { @@ -7232,71 +5771,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/unicode-canonical-property-names-ecmascript": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", - "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==", - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-match-property-ecmascript": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", - "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", - "dependencies": { - "unicode-canonical-property-names-ecmascript": "^2.0.0", - "unicode-property-aliases-ecmascript": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-match-property-value-ecmascript": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz", - "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==", - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-property-aliases-ecmascript": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", - "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", - "engines": { - "node": ">=4" - } - }, - "node_modules/update-browserslist-db": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", - "integrity": "sha512-dCwEFf0/oT85M1fHBg4F0jtLwJrutGoHSQXCh7u4o2t1drG+c0a9Flnqww6XUKSfQMPpJBRjU8d4RXB09qtvaA==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/browserslist" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "dependencies": { - "escalade": "^3.1.1", - "picocolors": "^1.0.0" - }, - "bin": { - "update-browserslist-db": "cli.js" - }, - "peerDependencies": { - "browserslist": ">= 4.21.0" - } - }, "node_modules/uri-js": { "version": "4.4.1", "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", @@ -7311,18 +5785,6 @@ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" }, - "node_modules/watchpack": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz", - "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", - "dependencies": { - "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.1.2" - }, - "engines": { - "node": ">=10.13.0" - } - }, "node_modules/which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", @@ -7354,33 +5816,67 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/which-collection": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.1.tgz", - "integrity": "sha512-W8xeTUwaln8i3K/cY1nGXzdnVZlidBcagyNFtBdD5kxnb4TvGKR7FfSIS3mYpwWS1QUCutfKz8IY8RjftB0+1A==", + "node_modules/which-builtin-type": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.1.3.tgz", + "integrity": "sha512-YmjsSMDBYsM1CaFiayOVT06+KJeXf0o5M/CAd4o1lTadFAtacTUM49zoYxr/oroopFDfhvN6iEcBxUyc3gvKmw==", "dev": true, "dependencies": { - "is-map": "^2.0.1", - "is-set": "^2.0.1", - "is-weakmap": "^2.0.1", - "is-weakset": "^2.0.1" + "function.prototype.name": "^1.1.5", + "has-tostringtag": "^1.0.0", + "is-async-function": "^2.0.0", + "is-date-object": "^1.0.5", + "is-finalizationregistry": "^1.0.2", + "is-generator-function": "^1.0.10", + "is-regex": "^1.1.4", + "is-weakref": "^1.0.2", + "isarray": "^2.0.5", + "which-boxed-primitive": "^1.0.2", + "which-collection": "^1.0.1", + "which-typed-array": "^1.1.9" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-builtin-type/node_modules/isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + "dev": true + }, + "node_modules/which-collection": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.2.tgz", + "integrity": "sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==", + "dev": true, + "dependencies": { + "is-map": "^2.0.3", + "is-set": "^2.0.3", + "is-weakmap": "^2.0.2", + "is-weakset": "^2.0.3" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/which-typed-array": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.9.tgz", - "integrity": "sha512-w9c4xkx6mPidwp7180ckYWfMmvxpjlZuIudNtDf4N/tTAUB8VJbX25qZoAsrtGuYNnGw3pa0AXgbGKRB8/EceA==", + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.15.tgz", + "integrity": "sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA==", "dev": true, "dependencies": { - "available-typed-arrays": "^1.0.5", - "call-bind": "^1.0.2", + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", "for-each": "^0.3.3", "gopd": "^1.0.1", - "has-tostringtag": "^1.0.0", - "is-typed-array": "^1.1.10" + "has-tostringtag": "^1.0.2" }, "engines": { "node": ">= 0.4" @@ -7390,18 +5886,146 @@ } }, "node_modules/word-wrap": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", "dev": true, "engines": { "node": ">=0.10.0" } }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/wrap-ansi-cjs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true }, "node_modules/ws": { "version": "8.11.0", @@ -7447,12 +6071,6 @@ "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-0.4.0.tgz", "integrity": "sha512-ncrLw+X55z7bkl5PnUvHwFK9FcGuFYo9gtjws2XtSzL+aZ8tm830P60WJ0dSmFVaSalWieW5MD7kEdnXda9yJw==" }, - "node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true - }, "node_modules/yaml": { "version": "1.10.2", "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", @@ -7483,1484 +6101,202 @@ "toposort": "^2.0.2", "type-fest": "^2.19.0" } - }, - "node_modules/yup/node_modules/type-fest": { - "version": "2.19.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", - "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", - "engines": { - "node": ">=12.20" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } } }, "dependencies": { - "@ampproject/remapping": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", - "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", - "requires": { - "@jridgewell/gen-mapping": "^0.3.0", - "@jridgewell/trace-mapping": "^0.3.9" - } - }, - "@babel/cli": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/cli/-/cli-7.22.5.tgz", - "integrity": "sha512-N5d7MjzwsQ2wppwjhrsicVDhJSqF9labEP/swYiHhio4Ca2XjEehpgPmerjnLQl7BPE59BLud0PTWGYwqFl/cQ==", - "requires": { - "@jridgewell/trace-mapping": "^0.3.17", - "@nicolo-ribaudo/chokidar-2": "2.1.8-no-fsevents.3", - "chokidar": "^3.4.0", - "commander": "^4.0.1", - "convert-source-map": "^1.1.0", - "fs-readdir-recursive": "^1.1.0", - "glob": "^7.2.0", - "make-dir": "^2.1.0", - "slash": "^2.0.0" - }, - "dependencies": { - "glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "slash": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz", - "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==" - } - } - }, "@babel/code-frame": { - "version": "7.22.13", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", - "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.24.7.tgz", + "integrity": "sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==", "requires": { - "@babel/highlight": "^7.22.13", - "chalk": "^2.4.2" - }, - "dependencies": { - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "requires": { - "color-convert": "^1.9.0" - } - }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" - }, - "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==" - }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==" - }, - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "requires": { - "has-flag": "^3.0.0" - } - } - } - }, - "@babel/compat-data": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.5.tgz", - "integrity": "sha512-4Jc/YuIaYqKnDDz892kPIledykKg12Aw1PYX5i/TY28anJtacvM1Rrr8wbieB9GfEJwlzqT0hUEao0CxEebiDA==" - }, - "@babel/core": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.22.5.tgz", - "integrity": "sha512-SBuTAjg91A3eKOvD+bPEz3LlhHZRNu1nFOVts9lzDJTXshHTjII0BAtDS3Y2DAkdZdDKWVZGVwkDfc4Clxn1dg==", - "requires": { - "@ampproject/remapping": "^2.2.0", - "@babel/code-frame": "^7.22.5", - "@babel/generator": "^7.22.5", - "@babel/helper-compilation-targets": "^7.22.5", - "@babel/helper-module-transforms": "^7.22.5", - "@babel/helpers": "^7.22.5", - "@babel/parser": "^7.22.5", - "@babel/template": "^7.22.5", - "@babel/traverse": "^7.23.2", - "@babel/types": "^7.22.5", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.2.2", - "semver": "^6.3.0" - }, - "dependencies": { - "json5": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", - "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==" - }, - "semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==" - } + "@babel/highlight": "^7.24.7", + "picocolors": "^1.0.0" } }, "@babel/generator": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz", - "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.24.7.tgz", + "integrity": "sha512-oipXieGC3i45Y1A41t4tAqpnEZWgB/lC6Ehh6+rOviR5XWpTtMmLN+fGjz9vOiNRt0p6RtO6DtD0pdU3vpqdSA==", "requires": { - "@babel/types": "^7.23.0", - "@jridgewell/gen-mapping": "^0.3.2", - "@jridgewell/trace-mapping": "^0.3.17", + "@babel/types": "^7.24.7", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", "jsesc": "^2.5.1" } }, - "@babel/helper-annotate-as-pure": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz", - "integrity": "sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==", - "requires": { - "@babel/types": "^7.22.5" - } - }, - "@babel/helper-builder-binary-assignment-operator-visitor": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.22.5.tgz", - "integrity": "sha512-m1EP3lVOPptR+2DwD125gziZNcmoNSHGmJROKoy87loWUQyJaVXDgpmruWqDARZSmtYQ+Dl25okU8+qhVzuykw==", - "requires": { - "@babel/types": "^7.22.5" - } - }, - "@babel/helper-compilation-targets": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.5.tgz", - "integrity": "sha512-Ji+ywpHeuqxB8WDxraCiqR0xfhYjiDE/e6k7FuIaANnoOFxAHskHChz4vA1mJC9Lbm01s1PVAGhQY4FUKSkGZw==", - "requires": { - "@babel/compat-data": "^7.22.5", - "@babel/helper-validator-option": "^7.22.5", - "browserslist": "^4.21.3", - "lru-cache": "^5.1.1", - "semver": "^6.3.0" - }, - "dependencies": { - "lru-cache": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", - "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", - "requires": { - "yallist": "^3.0.2" - } - }, - "semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==" - }, - "yallist": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", - "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" - } - } - }, - "@babel/helper-create-class-features-plugin": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.22.5.tgz", - "integrity": "sha512-xkb58MyOYIslxu3gKmVXmjTtUPvBU4odYzbiIQbWwLKIHCsx6UGZGX6F1IznMFVnDdirseUZopzN+ZRt8Xb33Q==", - "requires": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-function-name": "^7.22.5", - "@babel/helper-member-expression-to-functions": "^7.22.5", - "@babel/helper-optimise-call-expression": "^7.22.5", - "@babel/helper-replace-supers": "^7.22.5", - "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.5", - "semver": "^6.3.0" - }, - "dependencies": { - "semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==" - } - } - }, - "@babel/helper-create-regexp-features-plugin": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.22.5.tgz", - "integrity": "sha512-1VpEFOIbMRaXyDeUwUfmTIxExLwQ+zkW+Bh5zXpApA3oQedBx9v/updixWxnx/bZpKw7u8VxWjb/qWpIcmPq8A==", - "requires": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "regexpu-core": "^5.3.1", - "semver": "^6.3.0" - }, - "dependencies": { - "semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==" - } - } - }, - "@babel/helper-define-polyfill-provider": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.4.0.tgz", - "integrity": "sha512-RnanLx5ETe6aybRi1cO/edaRH+bNYWaryCEmjDDYyNr4wnSzyOp8T0dWipmqVHKEY3AbVKUom50AKSlj1zmKbg==", - "requires": { - "@babel/helper-compilation-targets": "^7.17.7", - "@babel/helper-plugin-utils": "^7.16.7", - "debug": "^4.1.1", - "lodash.debounce": "^4.0.8", - "resolve": "^1.14.2", - "semver": "^6.1.2" - }, - "dependencies": { - "semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==" - } - } - }, "@babel/helper-environment-visitor": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", - "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==" + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.24.7.tgz", + "integrity": "sha512-DoiN84+4Gnd0ncbBOM9AZENV4a5ZiL39HYMyZJGZ/AZEykHYdJw0wW3kdcsh9/Kn+BRXHLkkklZ51ecPKmI1CQ==", + "requires": { + "@babel/types": "^7.24.7" + } }, "@babel/helper-function-name": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", - "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.24.7.tgz", + "integrity": "sha512-FyoJTsj/PEUWu1/TYRiXTIHc8lbw+TDYkZuoE43opPS5TrI7MyONBE1oNvfguEXAD9yhQRrVBnXdXzSLQl9XnA==", "requires": { - "@babel/template": "^7.22.15", - "@babel/types": "^7.23.0" + "@babel/template": "^7.24.7", + "@babel/types": "^7.24.7" } }, "@babel/helper-hoist-variables": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", - "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.24.7.tgz", + "integrity": "sha512-MJJwhkoGy5c4ehfoRyrJ/owKeMl19U54h27YYftT0o2teQ3FJ3nQUf/I3LlJsX4l3qlw7WRXUmiyajvHXoTubQ==", "requires": { - "@babel/types": "^7.22.5" - } - }, - "@babel/helper-member-expression-to-functions": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.22.5.tgz", - "integrity": "sha512-aBiH1NKMG0H2cGZqspNvsaBe6wNGjbJjuLy29aU+eDZjSbbN53BaxlpB02xm9v34pLTZ1nIQPFYn2qMZoa5BQQ==", - "requires": { - "@babel/types": "^7.22.5" + "@babel/types": "^7.24.7" } }, "@babel/helper-module-imports": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.18.6.tgz", - "integrity": "sha512-0NFvs3VkuSYbFi1x2Vd6tKrywq+z/cLeYC/RJNFrIX/30Bf5aiGYbtvGXolEktzJH8o5E5KJ3tT+nkxuuZFVlA==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.24.7.tgz", + "integrity": "sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==", "requires": { - "@babel/types": "^7.18.6" - } - }, - "@babel/helper-module-transforms": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.22.5.tgz", - "integrity": "sha512-+hGKDt/Ze8GFExiVHno/2dvG5IdstpzCq0y4Qc9OJ25D4q3pKfiIP/4Vp3/JvhDkLKsDK2api3q3fpIgiIF5bw==", - "requires": { - "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-module-imports": "^7.22.5", - "@babel/helper-simple-access": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.5", - "@babel/helper-validator-identifier": "^7.22.5", - "@babel/template": "^7.22.5", - "@babel/traverse": "^7.22.5", - "@babel/types": "^7.22.5" - }, - "dependencies": { - "@babel/helper-module-imports": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.5.tgz", - "integrity": "sha512-8Dl6+HD/cKifutF5qGd/8ZJi84QeAKh+CEe1sBzz8UayBBGg1dAIJrdHOcOM5b2MpzWL2yuotJTtGjETq0qjXg==", - "requires": { - "@babel/types": "^7.22.5" - } - } - } - }, - "@babel/helper-optimise-call-expression": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz", - "integrity": "sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==", - "requires": { - "@babel/types": "^7.22.5" - } - }, - "@babel/helper-plugin-utils": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz", - "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==" - }, - "@babel/helper-remap-async-to-generator": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.5.tgz", - "integrity": "sha512-cU0Sq1Rf4Z55fgz7haOakIyM7+x/uCFwXpLPaeRzfoUtAEAuUZjZvFPjL/rk5rW693dIgn2hng1W7xbT7lWT4g==", - "requires": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-wrap-function": "^7.22.5", - "@babel/types": "^7.22.5" - } - }, - "@babel/helper-replace-supers": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.22.5.tgz", - "integrity": "sha512-aLdNM5I3kdI/V9xGNyKSF3X/gTyMUBohTZ+/3QdQKAA9vxIiy12E+8E2HoOP1/DjeqU+g6as35QHJNMDDYpuCg==", - "requires": { - "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-member-expression-to-functions": "^7.22.5", - "@babel/helper-optimise-call-expression": "^7.22.5", - "@babel/template": "^7.22.5", - "@babel/traverse": "^7.22.5", - "@babel/types": "^7.22.5" - } - }, - "@babel/helper-simple-access": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz", - "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==", - "requires": { - "@babel/types": "^7.22.5" - } - }, - "@babel/helper-skip-transparent-expression-wrappers": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz", - "integrity": "sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==", - "requires": { - "@babel/types": "^7.22.5" + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" } }, "@babel/helper-split-export-declaration": { - "version": "7.22.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", - "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.24.7.tgz", + "integrity": "sha512-oy5V7pD+UvfkEATUKvIjvIAH/xCzfsFVw7ygW2SI6NClZzquT+mwdTfgfdbUiceh6iQO0CHtCPsyze/MZ2YbAA==", "requires": { - "@babel/types": "^7.22.5" + "@babel/types": "^7.24.7" } }, "@babel/helper-string-parser": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", - "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==" + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.24.7.tgz", + "integrity": "sha512-7MbVt6xrwFQbunH2DNQsAP5sTGxfqQtErvBIvIMi6EQnbgUOuVYanvREcmFrOPhoXBrTtjhhP+lW+o5UfK+tDg==" }, "@babel/helper-validator-identifier": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", - "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==" - }, - "@babel/helper-validator-option": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.5.tgz", - "integrity": "sha512-R3oB6xlIVKUnxNUxbmgq7pKjxpru24zlimpE8WK47fACIlM0II/Hm1RS8IaOI7NgCr6LNS+jl5l75m20npAziw==" - }, - "@babel/helper-wrap-function": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.22.5.tgz", - "integrity": "sha512-bYqLIBSEshYcYQyfks8ewYA8S30yaGSeRslcvKMvoUk6HHPySbxHq9YRi6ghhzEU+yhQv9bP/jXnygkStOcqZw==", - "requires": { - "@babel/helper-function-name": "^7.22.5", - "@babel/template": "^7.22.5", - "@babel/traverse": "^7.22.5", - "@babel/types": "^7.22.5" - } - }, - "@babel/helpers": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.22.5.tgz", - "integrity": "sha512-pSXRmfE1vzcUIDFQcSGA5Mr+GxBV9oiRKDuDxXvWQQBCh8HoIjs/2DlDB7H8smac1IVrB9/xdXj2N3Wol9Cr+Q==", - "requires": { - "@babel/template": "^7.22.5", - "@babel/traverse": "^7.22.5", - "@babel/types": "^7.22.5" - } + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz", + "integrity": "sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==" }, "@babel/highlight": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", - "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.24.7.tgz", + "integrity": "sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==", "requires": { - "@babel/helper-validator-identifier": "^7.22.20", + "@babel/helper-validator-identifier": "^7.24.7", "chalk": "^2.4.2", - "js-tokens": "^4.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "requires": { - "color-convert": "^1.9.0" - } - }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" - }, - "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==" - }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==" - }, - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "requires": { - "has-flag": "^3.0.0" - } - } + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" } }, "@babel/parser": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz", - "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==" - }, - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.22.5.tgz", - "integrity": "sha512-NP1M5Rf+u2Gw9qfSO4ihjcTGW5zXTi36ITLd4/EoAcEhIZ0yjMqmftDNl3QC19CX7olhrjpyU454g/2W7X0jvQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.22.5.tgz", - "integrity": "sha512-31Bb65aZaUwqCbWMnZPduIZxCBngHFlzyN6Dq6KAJjtx+lx6ohKHubc61OomYi7XwVD4Ol0XCVz4h+pYFR048g==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", - "@babel/plugin-transform-optional-chaining": "^7.22.5" - } - }, - "@babel/plugin-external-helpers": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-external-helpers/-/plugin-external-helpers-7.22.5.tgz", - "integrity": "sha512-ngnNEWxmykPk82mH4ajZT0qTztr3Je6hrMuKAslZVM8G1YZTENJSYwrIGtt6KOtznug3exmAtF4so/nPqJuA4A==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-proposal-class-properties": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.18.6.tgz", - "integrity": "sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ==", - "requires": { - "@babel/helper-create-class-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-proposal-object-rest-spread": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.20.7.tgz", - "integrity": "sha512-d2S98yCiLxDVmBmE8UjGcfPvNEUbA1U5q5WxaWFUGRzJSVAZqm5W6MbPct0jxnegUZ0niLeNX+IOzEs7wYg9Dg==", - "requires": { - "@babel/compat-data": "^7.20.5", - "@babel/helper-compilation-targets": "^7.20.7", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.20.7" - } - }, - "@babel/plugin-proposal-private-property-in-object": { - "version": "7.21.0-placeholder-for-preset-env.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", - "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", - "requires": {} - }, - "@babel/plugin-proposal-unicode-property-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.18.6.tgz", - "integrity": "sha512-2BShG/d5yoZyXZfVePH91urL5wTG6ASZU9M4o03lKK8u8UW1y08OMttBSOADTcJrnPMpvDXRG3G8fyLh4ovs8w==", - "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-syntax-async-generators": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", - "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-class-properties": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", - "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", - "requires": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "@babel/plugin-syntax-class-static-block": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", - "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", - "requires": { - "@babel/helper-plugin-utils": "^7.14.5" - } - }, - "@babel/plugin-syntax-dynamic-import": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", - "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-export-namespace-from": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", - "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", - "requires": { - "@babel/helper-plugin-utils": "^7.8.3" - } - }, - "@babel/plugin-syntax-import-assertions": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.22.5.tgz", - "integrity": "sha512-rdV97N7KqsRzeNGoWUOK6yUsWarLjE5Su/Snk9IYPU9CwkWHs4t+rTGOvffTR8XGkJMTAdLfO0xVnXm8wugIJg==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-syntax-import-attributes": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.22.5.tgz", - "integrity": "sha512-KwvoWDeNKPETmozyFE0P2rOLqh39EoQHNjqizrI5B8Vt0ZNS7M56s7dAiAqbYfiAYOuIzIh96z3iR2ktgu3tEg==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-syntax-import-meta": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", - "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", - "requires": { - "@babel/helper-plugin-utils": "^7.10.4" - } - }, - "@babel/plugin-syntax-json-strings": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", - "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-jsx": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.22.5.tgz", - "integrity": "sha512-gvyP4hZrgrs/wWMaocvxZ44Hw0b3W8Pe+cMxc8V1ULQ07oh8VNbIRaoD1LRZVTvD+0nieDKjfgKg89sD7rrKrg==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-syntax-logical-assignment-operators": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", - "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", - "requires": { - "@babel/helper-plugin-utils": "^7.10.4" - } - }, - "@babel/plugin-syntax-nullish-coalescing-operator": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", - "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-numeric-separator": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", - "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", - "requires": { - "@babel/helper-plugin-utils": "^7.10.4" - } - }, - "@babel/plugin-syntax-object-rest-spread": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", - "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-optional-catch-binding": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", - "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-optional-chaining": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", - "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-private-property-in-object": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", - "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", - "requires": { - "@babel/helper-plugin-utils": "^7.14.5" - } - }, - "@babel/plugin-syntax-top-level-await": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", - "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", - "requires": { - "@babel/helper-plugin-utils": "^7.14.5" - } - }, - "@babel/plugin-syntax-typescript": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.22.5.tgz", - "integrity": "sha512-1mS2o03i7t1c6VzH6fdQ3OA8tcEIxwG18zIPRp+UY1Ihv6W+XZzBCVxExF9upussPXJ0xE9XRHwMoNs1ep/nRQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-syntax-unicode-sets-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", - "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", - "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-arrow-functions": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.22.5.tgz", - "integrity": "sha512-26lTNXoVRdAnsaDXPpvCNUq+OVWEVC6bx7Vvz9rC53F2bagUWW4u4ii2+h8Fejfh7RYqPxn+libeFBBck9muEw==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-async-generator-functions": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.22.5.tgz", - "integrity": "sha512-gGOEvFzm3fWoyD5uZq7vVTD57pPJ3PczPUD/xCFGjzBpUosnklmXyKnGQbbbGs1NPNPskFex0j93yKbHt0cHyg==", - "requires": { - "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-remap-async-to-generator": "^7.22.5", - "@babel/plugin-syntax-async-generators": "^7.8.4" - } - }, - "@babel/plugin-transform-async-to-generator": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.22.5.tgz", - "integrity": "sha512-b1A8D8ZzE/VhNDoV1MSJTnpKkCG5bJo+19R4o4oy03zM7ws8yEMK755j61Dc3EyvdysbqH5BOOTquJ7ZX9C6vQ==", - "requires": { - "@babel/helper-module-imports": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-remap-async-to-generator": "^7.22.5" - }, - "dependencies": { - "@babel/helper-module-imports": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.5.tgz", - "integrity": "sha512-8Dl6+HD/cKifutF5qGd/8ZJi84QeAKh+CEe1sBzz8UayBBGg1dAIJrdHOcOM5b2MpzWL2yuotJTtGjETq0qjXg==", - "requires": { - "@babel/types": "^7.22.5" - } - } - } - }, - "@babel/plugin-transform-block-scoped-functions": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.22.5.tgz", - "integrity": "sha512-tdXZ2UdknEKQWKJP1KMNmuF5Lx3MymtMN/pvA+p/VEkhK8jVcQ1fzSy8KM9qRYhAf2/lV33hoMPKI/xaI9sADA==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-block-scoping": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.22.5.tgz", - "integrity": "sha512-EcACl1i5fSQ6bt+YGuU/XGCeZKStLmyVGytWkpyhCLeQVA0eu6Wtiw92V+I1T/hnezUv7j74dA/Ro69gWcU+hg==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-class-properties": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.22.5.tgz", - "integrity": "sha512-nDkQ0NfkOhPTq8YCLiWNxp1+f9fCobEjCb0n8WdbNUBc4IB5V7P1QnX9IjpSoquKrXF5SKojHleVNs2vGeHCHQ==", - "requires": { - "@babel/helper-create-class-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-class-static-block": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.22.5.tgz", - "integrity": "sha512-SPToJ5eYZLxlnp1UzdARpOGeC2GbHvr9d/UV0EukuVx8atktg194oe+C5BqQ8jRTkgLRVOPYeXRSBg1IlMoVRA==", - "requires": { - "@babel/helper-create-class-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-class-static-block": "^7.14.5" - } - }, - "@babel/plugin-transform-classes": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.22.5.tgz", - "integrity": "sha512-2edQhLfibpWpsVBx2n/GKOz6JdGQvLruZQfGr9l1qes2KQaWswjBzhQF7UDUZMNaMMQeYnQzxwOMPsbYF7wqPQ==", - "requires": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-compilation-targets": "^7.22.5", - "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-function-name": "^7.22.5", - "@babel/helper-optimise-call-expression": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-replace-supers": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.5", - "globals": "^11.1.0" - }, - "dependencies": { - "globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==" - } - } - }, - "@babel/plugin-transform-computed-properties": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.22.5.tgz", - "integrity": "sha512-4GHWBgRf0krxPX+AaPtgBAlTgTeZmqDynokHOX7aqqAB4tHs3U2Y02zH6ETFdLZGcg9UQSD1WCmkVrE9ErHeOg==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/template": "^7.22.5" - } - }, - "@babel/plugin-transform-destructuring": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.22.5.tgz", - "integrity": "sha512-GfqcFuGW8vnEqTUBM7UtPd5A4q797LTvvwKxXTgRsFjoqaJiEg9deBG6kWeQYkVEL569NpnmpC0Pkr/8BLKGnQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-dotall-regex": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.22.5.tgz", - "integrity": "sha512-5/Yk9QxCQCl+sOIB1WelKnVRxTJDSAIxtJLL2/pqL14ZVlbH0fUQUZa/T5/UnQtBNgghR7mfB8ERBKyKPCi7Vw==", - "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-duplicate-keys": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.22.5.tgz", - "integrity": "sha512-dEnYD+9BBgld5VBXHnF/DbYGp3fqGMsyxKbtD1mDyIA7AkTSpKXFhCVuj/oQVOoALfBs77DudA0BE4d5mcpmqw==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-dynamic-import": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.22.5.tgz", - "integrity": "sha512-0MC3ppTB1AMxd8fXjSrbPa7LT9hrImt+/fcj+Pg5YMD7UQyWp/02+JWpdnCymmsXwIx5Z+sYn1bwCn4ZJNvhqQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-dynamic-import": "^7.8.3" - } - }, - "@babel/plugin-transform-exponentiation-operator": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.22.5.tgz", - "integrity": "sha512-vIpJFNM/FjZ4rh1myqIya9jXwrwwgFRHPjT3DkUA9ZLHuzox8jiXkOLvwm1H+PQIP3CqfC++WPKeuDi0Sjdj1g==", - "requires": { - "@babel/helper-builder-binary-assignment-operator-visitor": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-export-namespace-from": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.22.5.tgz", - "integrity": "sha512-X4hhm7FRnPgd4nDA4b/5V280xCx6oL7Oob5+9qVS5C13Zq4bh1qq7LU0GgRU6b5dBWBvhGaXYVB4AcN6+ol6vg==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3" - } - }, - "@babel/plugin-transform-for-of": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.22.5.tgz", - "integrity": "sha512-3kxQjX1dU9uudwSshyLeEipvrLjBCVthCgeTp6CzE/9JYrlAIaeekVxRpCWsDDfYTfRZRoCeZatCQvwo+wvK8A==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-function-name": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.22.5.tgz", - "integrity": "sha512-UIzQNMS0p0HHiQm3oelztj+ECwFnj+ZRV4KnguvlsD2of1whUeM6o7wGNj6oLwcDoAXQ8gEqfgC24D+VdIcevg==", - "requires": { - "@babel/helper-compilation-targets": "^7.22.5", - "@babel/helper-function-name": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-json-strings": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.22.5.tgz", - "integrity": "sha512-DuCRB7fu8MyTLbEQd1ew3R85nx/88yMoqo2uPSjevMj3yoN7CDM8jkgrY0wmVxfJZyJ/B9fE1iq7EQppWQmR5A==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-json-strings": "^7.8.3" - } - }, - "@babel/plugin-transform-literals": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.22.5.tgz", - "integrity": "sha512-fTLj4D79M+mepcw3dgFBTIDYpbcB9Sm0bpm4ppXPaO+U+PKFFyV9MGRvS0gvGw62sd10kT5lRMKXAADb9pWy8g==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-logical-assignment-operators": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.22.5.tgz", - "integrity": "sha512-MQQOUW1KL8X0cDWfbwYP+TbVbZm16QmQXJQ+vndPtH/BoO0lOKpVoEDMI7+PskYxH+IiE0tS8xZye0qr1lGzSA==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" - } - }, - "@babel/plugin-transform-member-expression-literals": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.22.5.tgz", - "integrity": "sha512-RZEdkNtzzYCFl9SE9ATaUMTj2hqMb4StarOJLrZRbqqU4HSBE7UlBw9WBWQiDzrJZJdUWiMTVDI6Gv/8DPvfew==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-modules-amd": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.22.5.tgz", - "integrity": "sha512-R+PTfLTcYEmb1+kK7FNkhQ1gP4KgjpSO6HfH9+f8/yfp2Nt3ggBjiVpRwmwTlfqZLafYKJACy36yDXlEmI9HjQ==", - "requires": { - "@babel/helper-module-transforms": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-modules-commonjs": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.22.5.tgz", - "integrity": "sha512-B4pzOXj+ONRmuaQTg05b3y/4DuFz3WcCNAXPLb2Q0GT0TrGKGxNKV4jwsXts+StaM0LQczZbOpj8o1DLPDJIiA==", - "requires": { - "@babel/helper-module-transforms": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-simple-access": "^7.22.5" - } - }, - "@babel/plugin-transform-modules-systemjs": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.22.5.tgz", - "integrity": "sha512-emtEpoaTMsOs6Tzz+nbmcePl6AKVtS1yC4YNAeMun9U8YCsgadPNxnOPQ8GhHFB2qdx+LZu9LgoC0Lthuu05DQ==", - "requires": { - "@babel/helper-hoist-variables": "^7.22.5", - "@babel/helper-module-transforms": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-validator-identifier": "^7.22.5" - } - }, - "@babel/plugin-transform-modules-umd": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.22.5.tgz", - "integrity": "sha512-+S6kzefN/E1vkSsKx8kmQuqeQsvCKCd1fraCM7zXm4SFoggI099Tr4G8U81+5gtMdUeMQ4ipdQffbKLX0/7dBQ==", - "requires": { - "@babel/helper-module-transforms": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.22.5.tgz", - "integrity": "sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==", - "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-new-target": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.22.5.tgz", - "integrity": "sha512-AsF7K0Fx/cNKVyk3a+DW0JLo+Ua598/NxMRvxDnkpCIGFh43+h/v2xyhRUYf6oD8gE4QtL83C7zZVghMjHd+iw==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-nullish-coalescing-operator": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.22.5.tgz", - "integrity": "sha512-6CF8g6z1dNYZ/VXok5uYkkBBICHZPiGEl7oDnAx2Mt1hlHVHOSIKWJaXHjQJA5VB43KZnXZDIexMchY4y2PGdA==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" - } - }, - "@babel/plugin-transform-numeric-separator": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.22.5.tgz", - "integrity": "sha512-NbslED1/6M+sXiwwtcAB/nieypGw02Ejf4KtDeMkCEpP6gWFMX1wI9WKYua+4oBneCCEmulOkRpwywypVZzs/g==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-numeric-separator": "^7.10.4" - } - }, - "@babel/plugin-transform-object-rest-spread": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.22.5.tgz", - "integrity": "sha512-Kk3lyDmEslH9DnvCDA1s1kkd3YWQITiBOHngOtDL9Pt6BZjzqb6hiOlb8VfjiiQJ2unmegBqZu0rx5RxJb5vmQ==", - "requires": { - "@babel/compat-data": "^7.22.5", - "@babel/helper-compilation-targets": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.22.5" - } - }, - "@babel/plugin-transform-object-super": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.22.5.tgz", - "integrity": "sha512-klXqyaT9trSjIUrcsYIfETAzmOEZL3cBYqOYLJxBHfMFFggmXOv+NYSX/Jbs9mzMVESw/WycLFPRx8ba/b2Ipw==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-replace-supers": "^7.22.5" - } - }, - "@babel/plugin-transform-optional-catch-binding": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.22.5.tgz", - "integrity": "sha512-pH8orJahy+hzZje5b8e2QIlBWQvGpelS76C63Z+jhZKsmzfNaPQ+LaW6dcJ9bxTpo1mtXbgHwy765Ro3jftmUg==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" - } - }, - "@babel/plugin-transform-optional-chaining": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.22.5.tgz", - "integrity": "sha512-AconbMKOMkyG+xCng2JogMCDcqW8wedQAqpVIL4cOSescZ7+iW8utC6YDZLMCSUIReEA733gzRSaOSXMAt/4WQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", - "@babel/plugin-syntax-optional-chaining": "^7.8.3" - } - }, - "@babel/plugin-transform-parameters": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.22.5.tgz", - "integrity": "sha512-AVkFUBurORBREOmHRKo06FjHYgjrabpdqRSwq6+C7R5iTCZOsM4QbcB27St0a4U6fffyAOqh3s/qEfybAhfivg==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-private-methods": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.22.5.tgz", - "integrity": "sha512-PPjh4gyrQnGe97JTalgRGMuU4icsZFnWkzicB/fUtzlKUqvsWBKEpPPfr5a2JiyirZkHxnAqkQMO5Z5B2kK3fA==", - "requires": { - "@babel/helper-create-class-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-private-property-in-object": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.22.5.tgz", - "integrity": "sha512-/9xnaTTJcVoBtSSmrVyhtSvO3kbqS2ODoh2juEU72c3aYonNF0OMGiaz2gjukyKM2wBBYJP38S4JiE0Wfb5VMQ==", - "requires": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-create-class-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5" - } - }, - "@babel/plugin-transform-property-literals": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.22.5.tgz", - "integrity": "sha512-TiOArgddK3mK/x1Qwf5hay2pxI6wCZnvQqrFSqbtg1GLl2JcNMitVH/YnqjP+M31pLUeTfzY1HAXFDnUBV30rQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-react-display-name": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.22.5.tgz", - "integrity": "sha512-PVk3WPYudRF5z4GKMEYUrLjPl38fJSKNaEOkFuoprioowGuWN6w2RKznuFNSlJx7pzzXXStPUnNSOEO0jL5EVw==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-react-jsx": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.22.5.tgz", - "integrity": "sha512-rog5gZaVbUip5iWDMTYbVM15XQq+RkUKhET/IHR6oizR+JEoN6CAfTTuHcK4vwUyzca30qqHqEpzBOnaRMWYMA==", - "requires": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-module-imports": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-jsx": "^7.22.5", - "@babel/types": "^7.22.5" - }, - "dependencies": { - "@babel/helper-module-imports": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.5.tgz", - "integrity": "sha512-8Dl6+HD/cKifutF5qGd/8ZJi84QeAKh+CEe1sBzz8UayBBGg1dAIJrdHOcOM5b2MpzWL2yuotJTtGjETq0qjXg==", - "requires": { - "@babel/types": "^7.22.5" - } - } - } - }, - "@babel/plugin-transform-react-jsx-development": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.22.5.tgz", - "integrity": "sha512-bDhuzwWMuInwCYeDeMzyi7TaBgRQei6DqxhbyniL7/VG4RSS7HtSL2QbY4eESy1KJqlWt8g3xeEBGPuo+XqC8A==", - "requires": { - "@babel/plugin-transform-react-jsx": "^7.22.5" - } - }, - "@babel/plugin-transform-react-pure-annotations": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.22.5.tgz", - "integrity": "sha512-gP4k85wx09q+brArVinTXhWiyzLl9UpmGva0+mWyKxk6JZequ05x3eUcIUE+FyttPKJFRRVtAvQaJ6YF9h1ZpA==", - "requires": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-regenerator": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.22.5.tgz", - "integrity": "sha512-rR7KePOE7gfEtNTh9Qw+iO3Q/e4DEsoQ+hdvM6QUDH7JRJ5qxq5AA52ZzBWbI5i9lfNuvySgOGP8ZN7LAmaiPw==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5", - "regenerator-transform": "^0.15.1" - } - }, - "@babel/plugin-transform-reserved-words": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.22.5.tgz", - "integrity": "sha512-DTtGKFRQUDm8svigJzZHzb/2xatPc6TzNvAIJ5GqOKDsGFYgAskjRulbR/vGsPKq3OPqtexnz327qYpP57RFyA==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-shorthand-properties": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.22.5.tgz", - "integrity": "sha512-vM4fq9IXHscXVKzDv5itkO1X52SmdFBFcMIBZ2FRn2nqVYqw6dBexUgMvAjHW+KXpPPViD/Yo3GrDEBaRC0QYA==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-spread": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.22.5.tgz", - "integrity": "sha512-5ZzDQIGyvN4w8+dMmpohL6MBo+l2G7tfC/O2Dg7/hjpgeWvUx8FzfeOKxGog9IimPa4YekaQ9PlDqTLOljkcxg==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5" - } - }, - "@babel/plugin-transform-sticky-regex": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.22.5.tgz", - "integrity": "sha512-zf7LuNpHG0iEeiyCNwX4j3gDg1jgt1k3ZdXBKbZSoA3BbGQGvMiSvfbZRR3Dr3aeJe3ooWFZxOOG3IRStYp2Bw==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-template-literals": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.22.5.tgz", - "integrity": "sha512-5ciOehRNf+EyUeewo8NkbQiUs4d6ZxiHo6BcBcnFlgiJfu16q0bQUw9Jvo0b0gBKFG1SMhDSjeKXSYuJLeFSMA==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-typeof-symbol": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.22.5.tgz", - "integrity": "sha512-bYkI5lMzL4kPii4HHEEChkD0rkc+nvnlR6+o/qdqR6zrm0Sv/nodmyLhlq2DO0YKLUNd2VePmPRjJXSBh9OIdA==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-typescript": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.22.5.tgz", - "integrity": "sha512-SMubA9S7Cb5sGSFFUlqxyClTA9zWJ8qGQrppNUm05LtFuN1ELRFNndkix4zUJrC9F+YivWwa1dHMSyo0e0N9dA==", - "requires": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-create-class-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-typescript": "^7.22.5" - } - }, - "@babel/plugin-transform-unicode-escapes": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.22.5.tgz", - "integrity": "sha512-biEmVg1IYB/raUO5wT1tgfacCef15Fbzhkx493D3urBI++6hpJ+RFG4SrWMn0NEZLfvilqKf3QDrRVZHo08FYg==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-unicode-property-regex": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.22.5.tgz", - "integrity": "sha512-HCCIb+CbJIAE6sXn5CjFQXMwkCClcOfPCzTlilJ8cUatfzwHlWQkbtV0zD338u9dZskwvuOYTuuaMaA8J5EI5A==", - "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-unicode-regex": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.22.5.tgz", - "integrity": "sha512-028laaOKptN5vHJf9/Arr/HiJekMd41hOEZYvNsrsXqJ7YPYuX2bQxh31fkZzGmq3YqHRJzYFFAVYvKfMPKqyg==", - "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/plugin-transform-unicode-sets-regex": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.22.5.tgz", - "integrity": "sha512-lhMfi4FC15j13eKrh3DnYHjpGj6UKQHtNKTbtc1igvAhRy4+kLhV07OpLcsN0VgDEw/MjAvJO4BdMJsHwMhzCg==", - "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" - } - }, - "@babel/preset-env": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.22.5.tgz", - "integrity": "sha512-fj06hw89dpiZzGZtxn+QybifF07nNiZjZ7sazs2aVDcysAZVGjW7+7iFYxg6GLNM47R/thYfLdrXc+2f11Vi9A==", - "requires": { - "@babel/compat-data": "^7.22.5", - "@babel/helper-compilation-targets": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-validator-option": "^7.22.5", - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.22.5", - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.22.5", - "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", - "@babel/plugin-syntax-async-generators": "^7.8.4", - "@babel/plugin-syntax-class-properties": "^7.12.13", - "@babel/plugin-syntax-class-static-block": "^7.14.5", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3", - "@babel/plugin-syntax-import-assertions": "^7.22.5", - "@babel/plugin-syntax-import-attributes": "^7.22.5", - "@babel/plugin-syntax-import-meta": "^7.10.4", - "@babel/plugin-syntax-json-strings": "^7.8.3", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", - "@babel/plugin-syntax-numeric-separator": "^7.10.4", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", - "@babel/plugin-syntax-optional-chaining": "^7.8.3", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5", - "@babel/plugin-syntax-top-level-await": "^7.14.5", - "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", - "@babel/plugin-transform-arrow-functions": "^7.22.5", - "@babel/plugin-transform-async-generator-functions": "^7.22.5", - "@babel/plugin-transform-async-to-generator": "^7.22.5", - "@babel/plugin-transform-block-scoped-functions": "^7.22.5", - "@babel/plugin-transform-block-scoping": "^7.22.5", - "@babel/plugin-transform-class-properties": "^7.22.5", - "@babel/plugin-transform-class-static-block": "^7.22.5", - "@babel/plugin-transform-classes": "^7.22.5", - "@babel/plugin-transform-computed-properties": "^7.22.5", - "@babel/plugin-transform-destructuring": "^7.22.5", - "@babel/plugin-transform-dotall-regex": "^7.22.5", - "@babel/plugin-transform-duplicate-keys": "^7.22.5", - "@babel/plugin-transform-dynamic-import": "^7.22.5", - "@babel/plugin-transform-exponentiation-operator": "^7.22.5", - "@babel/plugin-transform-export-namespace-from": "^7.22.5", - "@babel/plugin-transform-for-of": "^7.22.5", - "@babel/plugin-transform-function-name": "^7.22.5", - "@babel/plugin-transform-json-strings": "^7.22.5", - "@babel/plugin-transform-literals": "^7.22.5", - "@babel/plugin-transform-logical-assignment-operators": "^7.22.5", - "@babel/plugin-transform-member-expression-literals": "^7.22.5", - "@babel/plugin-transform-modules-amd": "^7.22.5", - "@babel/plugin-transform-modules-commonjs": "^7.22.5", - "@babel/plugin-transform-modules-systemjs": "^7.22.5", - "@babel/plugin-transform-modules-umd": "^7.22.5", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.22.5", - "@babel/plugin-transform-new-target": "^7.22.5", - "@babel/plugin-transform-nullish-coalescing-operator": "^7.22.5", - "@babel/plugin-transform-numeric-separator": "^7.22.5", - "@babel/plugin-transform-object-rest-spread": "^7.22.5", - "@babel/plugin-transform-object-super": "^7.22.5", - "@babel/plugin-transform-optional-catch-binding": "^7.22.5", - "@babel/plugin-transform-optional-chaining": "^7.22.5", - "@babel/plugin-transform-parameters": "^7.22.5", - "@babel/plugin-transform-private-methods": "^7.22.5", - "@babel/plugin-transform-private-property-in-object": "^7.22.5", - "@babel/plugin-transform-property-literals": "^7.22.5", - "@babel/plugin-transform-regenerator": "^7.22.5", - "@babel/plugin-transform-reserved-words": "^7.22.5", - "@babel/plugin-transform-shorthand-properties": "^7.22.5", - "@babel/plugin-transform-spread": "^7.22.5", - "@babel/plugin-transform-sticky-regex": "^7.22.5", - "@babel/plugin-transform-template-literals": "^7.22.5", - "@babel/plugin-transform-typeof-symbol": "^7.22.5", - "@babel/plugin-transform-unicode-escapes": "^7.22.5", - "@babel/plugin-transform-unicode-property-regex": "^7.22.5", - "@babel/plugin-transform-unicode-regex": "^7.22.5", - "@babel/plugin-transform-unicode-sets-regex": "^7.22.5", - "@babel/preset-modules": "^0.1.5", - "@babel/types": "^7.22.5", - "babel-plugin-polyfill-corejs2": "^0.4.3", - "babel-plugin-polyfill-corejs3": "^0.8.1", - "babel-plugin-polyfill-regenerator": "^0.5.0", - "core-js-compat": "^3.30.2", - "semver": "^6.3.0" - }, - "dependencies": { - "semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==" - } - } - }, - "@babel/preset-modules": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.5.tgz", - "integrity": "sha512-A57th6YRG7oR3cq/yt/Y84MvGgE0eJG2F1JLhKuyG+jFxEgrd/HAMJatiFtmOiZurz+0DkrvbheCLaV5f2JfjA==", - "requires": { - "@babel/helper-plugin-utils": "^7.0.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.4.4", - "@babel/plugin-transform-dotall-regex": "^7.4.4", - "@babel/types": "^7.4.4", - "esutils": "^2.0.2" - } - }, - "@babel/preset-react": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.22.5.tgz", - "integrity": "sha512-M+Is3WikOpEJHgR385HbuCITPTaPRaNkibTEa9oiofmJvIsrceb4yp9RL9Kb+TE8LznmeyZqpP+Lopwcx59xPQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-validator-option": "^7.22.5", - "@babel/plugin-transform-react-display-name": "^7.22.5", - "@babel/plugin-transform-react-jsx": "^7.22.5", - "@babel/plugin-transform-react-jsx-development": "^7.22.5", - "@babel/plugin-transform-react-pure-annotations": "^7.22.5" - } - }, - "@babel/preset-typescript": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.22.5.tgz", - "integrity": "sha512-YbPaal9LxztSGhmndR46FmAbkJ/1fAsw293tSU+I5E5h+cnJ3d4GTwyUgGYmOXJYdGA+uNePle4qbaRzj2NISQ==", - "requires": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-validator-option": "^7.22.5", - "@babel/plugin-syntax-jsx": "^7.22.5", - "@babel/plugin-transform-modules-commonjs": "^7.22.5", - "@babel/plugin-transform-typescript": "^7.22.5" - } - }, - "@babel/regjsgen": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz", - "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==" + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.24.7.tgz", + "integrity": "sha512-9uUYRm6OqQrCqQdG1iCBwBPZgN8ciDBro2nIOFaiRz1/BCxaI7CNvQbDHvsArAC7Tw9Hda/B3U+6ui9u4HWXPw==" }, "@babel/runtime": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.21.0.tgz", - "integrity": "sha512-xwII0//EObnq89Ji5AKYQaRYiW/nZ3llSv29d49IuxPhKbtJoLP+9QUUZ4nVragQVtaVGeZrpB+ZtG/Pdy/POw==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.7.tgz", + "integrity": "sha512-UwgBRMjJP+xv857DCngvqXI3Iq6J4v0wXmwc6sapg+zyhbwmQX67LUEFrkK5tbyJ30jGuG3ZvWpBiB9LCy1kWw==", "requires": { - "regenerator-runtime": "^0.13.11" + "regenerator-runtime": "^0.14.0" } }, "@babel/template": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", - "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.24.7.tgz", + "integrity": "sha512-jYqfPrU9JTF0PmPy1tLYHW4Mp4KlgxJD9l2nP9fD6yT/ICi554DmrWBAEYpIelzjHf1msDP3PxJIRt/nFNfBig==", "requires": { - "@babel/code-frame": "^7.22.13", - "@babel/parser": "^7.22.15", - "@babel/types": "^7.22.15" + "@babel/code-frame": "^7.24.7", + "@babel/parser": "^7.24.7", + "@babel/types": "^7.24.7" } }, "@babel/traverse": { - "version": "7.23.2", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", - "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.24.7.tgz", + "integrity": "sha512-yb65Ed5S/QAcewNPh0nZczy9JdYXkkAbIsEo+P7BE7yO3txAY30Y/oPa3QkQ5It3xVG2kpKMg9MsdxZaO31uKA==", "requires": { - "@babel/code-frame": "^7.22.13", - "@babel/generator": "^7.23.0", - "@babel/helper-environment-visitor": "^7.22.20", - "@babel/helper-function-name": "^7.23.0", - "@babel/helper-hoist-variables": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.6", - "@babel/parser": "^7.23.0", - "@babel/types": "^7.23.0", - "debug": "^4.1.0", + "@babel/code-frame": "^7.24.7", + "@babel/generator": "^7.24.7", + "@babel/helper-environment-visitor": "^7.24.7", + "@babel/helper-function-name": "^7.24.7", + "@babel/helper-hoist-variables": "^7.24.7", + "@babel/helper-split-export-declaration": "^7.24.7", + "@babel/parser": "^7.24.7", + "@babel/types": "^7.24.7", + "debug": "^4.3.1", "globals": "^11.1.0" - }, - "dependencies": { - "globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==" - } } }, "@babel/types": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", - "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.24.7.tgz", + "integrity": "sha512-XEFXSlxiG5td2EJRe8vOmRbaXVgfcBlszKujvVmWIK/UpywWljQCfzAv3RQCGujWQ1RD4YYWEAqDXfuJiy8f5Q==", "requires": { - "@babel/helper-string-parser": "^7.22.5", - "@babel/helper-validator-identifier": "^7.22.20", + "@babel/helper-string-parser": "^7.24.7", + "@babel/helper-validator-identifier": "^7.24.7", "to-fast-properties": "^2.0.0" } }, "@date-io/core": { - "version": "2.16.0", - "resolved": "https://registry.npmjs.org/@date-io/core/-/core-2.16.0.tgz", - "integrity": "sha512-DYmSzkr+jToahwWrsiRA2/pzMEtz9Bq1euJwoOuYwuwIYXnZFtHajY2E6a1VNVDc9jP8YUXK1BvnZH9mmT19Zg==" + "version": "2.17.0", + "resolved": "https://registry.npmjs.org/@date-io/core/-/core-2.17.0.tgz", + "integrity": "sha512-+EQE8xZhRM/hsY0CDTVyayMDDY5ihc4MqXCrPxooKw19yAzUIC6uUqsZeaOFNL9YKTNxYKrJP5DFgE8o5xRCOw==" }, "@date-io/date-fns": { - "version": "2.16.0", - "resolved": "https://registry.npmjs.org/@date-io/date-fns/-/date-fns-2.16.0.tgz", - "integrity": "sha512-bfm5FJjucqlrnQcXDVU5RD+nlGmL3iWgkHTq3uAZWVIuBu6dDmGa3m8a6zo2VQQpu8ambq9H22UyUpn7590joA==", + "version": "2.17.0", + "resolved": "https://registry.npmjs.org/@date-io/date-fns/-/date-fns-2.17.0.tgz", + "integrity": "sha512-L0hWZ/mTpy3Gx/xXJ5tq5CzHo0L7ry6KEO9/w/JWiFWFLZgiNVo3ex92gOl3zmzjHqY/3Ev+5sehAr8UnGLEng==", "requires": { - "@date-io/core": "^2.16.0" + "@date-io/core": "^2.17.0" } }, "@date-io/dayjs": { - "version": "2.16.0", - "resolved": "https://registry.npmjs.org/@date-io/dayjs/-/dayjs-2.16.0.tgz", - "integrity": "sha512-y5qKyX2j/HG3zMvIxTobYZRGnd1FUW2olZLS0vTj7bEkBQkjd2RO7/FEwDY03Z1geVGlXKnzIATEVBVaGzV4Iw==", + "version": "2.17.0", + "resolved": "https://registry.npmjs.org/@date-io/dayjs/-/dayjs-2.17.0.tgz", + "integrity": "sha512-Iq1wjY5XzBh0lheFA0it6Dsyv94e8mTiNR8vuTai+KopxDkreL3YjwTmZHxkgB7/vd0RMIACStzVgWvPATnDCA==", "requires": { - "@date-io/core": "^2.16.0" + "@date-io/core": "^2.17.0" } }, "@date-io/luxon": { - "version": "2.16.1", - "resolved": "https://registry.npmjs.org/@date-io/luxon/-/luxon-2.16.1.tgz", - "integrity": "sha512-aeYp5K9PSHV28946pC+9UKUi/xMMYoaGelrpDibZSgHu2VWHXrr7zWLEr+pMPThSs5vt8Ei365PO+84pCm37WQ==", + "version": "2.17.0", + "resolved": "https://registry.npmjs.org/@date-io/luxon/-/luxon-2.17.0.tgz", + "integrity": "sha512-l712Vdm/uTddD2XWt9TlQloZUiTiRQtY5TCOG45MQ/8u0tu8M17BD6QYHar/3OrnkGybALAMPzCy1r5D7+0HBg==", "requires": { - "@date-io/core": "^2.16.0" + "@date-io/core": "^2.17.0" } }, "@date-io/moment": { - "version": "2.16.1", - "resolved": "https://registry.npmjs.org/@date-io/moment/-/moment-2.16.1.tgz", - "integrity": "sha512-JkxldQxUqZBfZtsaCcCMkm/dmytdyq5pS1RxshCQ4fHhsvP5A7gSqPD22QbVXMcJydi3d3v1Y8BQdUKEuGACZQ==", + "version": "2.17.0", + "resolved": "https://registry.npmjs.org/@date-io/moment/-/moment-2.17.0.tgz", + "integrity": "sha512-e4nb4CDZU4k0WRVhz1Wvl7d+hFsedObSauDHKtZwU9kt7gdYEAzKgnrSCTHsEaXrDumdrkCYTeZ0Tmyk7uV4tw==", "requires": { - "@date-io/core": "^2.16.0" + "@date-io/core": "^2.17.0" } }, "@emotion/babel-plugin": { - "version": "11.10.6", - "resolved": "https://registry.npmjs.org/@emotion/babel-plugin/-/babel-plugin-11.10.6.tgz", - "integrity": "sha512-p2dAqtVrkhSa7xz1u/m9eHYdLi+en8NowrmXeF/dKtJpU8lCWli8RUAati7NcSl0afsBott48pdnANuD0wh9QQ==", + "version": "11.11.0", + "resolved": "https://registry.npmjs.org/@emotion/babel-plugin/-/babel-plugin-11.11.0.tgz", + "integrity": "sha512-m4HEDZleaaCH+XgDDsPF15Ht6wTLsgDTeR3WYj9Q/k76JtWhrJjcP4+/XlG8LGT/Rol9qUfOIztXeA84ATpqPQ==", "requires": { "@babel/helper-module-imports": "^7.16.7", "@babel/runtime": "^7.18.3", - "@emotion/hash": "^0.9.0", - "@emotion/memoize": "^0.8.0", - "@emotion/serialize": "^1.1.1", + "@emotion/hash": "^0.9.1", + "@emotion/memoize": "^0.8.1", + "@emotion/serialize": "^1.1.2", "babel-plugin-macros": "^3.1.0", "convert-source-map": "^1.5.0", "escape-string-regexp": "^4.0.0", "find-root": "^1.1.0", "source-map": "^0.5.7", - "stylis": "4.1.3" + "stylis": "4.2.0" + }, + "dependencies": { + "stylis": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", + "integrity": "sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==" + } } }, "@emotion/cache": { @@ -8976,22 +6312,22 @@ } }, "@emotion/hash": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.9.0.tgz", - "integrity": "sha512-14FtKiHhy2QoPIzdTcvh//8OyBlknNs2nXRwIhG904opCby3l+9Xaf/wuPvICBF0rc1ZCNBd3nKe9cd2mecVkQ==" + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.9.1.tgz", + "integrity": "sha512-gJB6HLm5rYwSLI6PQa+X1t5CFGrv1J1TWG+sOyMCeKz2ojaj6Fnl/rZEspogG+cvqbt4AE/2eIyD2QfLKTBNlQ==" }, "@emotion/is-prop-valid": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@emotion/is-prop-valid/-/is-prop-valid-1.2.0.tgz", - "integrity": "sha512-3aDpDprjM0AwaxGE09bOPkNxHpBd+kA6jty3RnaEXdweX1DF1U3VQpPYb0g1IStAuK7SVQ1cy+bNBBKp4W3Fjg==", + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@emotion/is-prop-valid/-/is-prop-valid-1.2.2.tgz", + "integrity": "sha512-uNsoYd37AFmaCdXlg6EYD1KaPOaRWRByMCYzbKUX4+hhMfrxdVSelShywL4JVaAeM/eHUOSprYBQls+/neX3pw==", "requires": { - "@emotion/memoize": "^0.8.0" + "@emotion/memoize": "^0.8.1" } }, "@emotion/memoize": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.8.0.tgz", - "integrity": "sha512-G/YwXTkv7Den9mXDO7AhLWkE3q+I92B+VqAE+dYG4NGPaHZGvt3G8Q0p9vmE+sq7rTGphUbAvmQ9YpbfMQGGlA==" + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.8.1.tgz", + "integrity": "sha512-W2P2c/VRW1/1tLox0mVUalvnWXxavmv/Oum2aPsRcoDJuob75FC3Y8FbpfLwUegRcxINtGUMPq0tFCvYNTBXNA==" }, "@emotion/react": { "version": "11.10.6", @@ -9009,14 +6345,14 @@ } }, "@emotion/serialize": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@emotion/serialize/-/serialize-1.1.1.tgz", - "integrity": "sha512-Zl/0LFggN7+L1liljxXdsVSVlg6E/Z/olVWpfxUTxOAmi8NU7YoeWeLfi1RmnB2TATHoaWwIBRoL+FvAJiTUQA==", + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/@emotion/serialize/-/serialize-1.1.4.tgz", + "integrity": "sha512-RIN04MBT8g+FnDwgvIUi8czvr1LU1alUMI05LekWB5DGyTm8cCBMCRpq3GqaiyEDRptEXOyXnvZ58GZYu4kBxQ==", "requires": { - "@emotion/hash": "^0.9.0", - "@emotion/memoize": "^0.8.0", - "@emotion/unitless": "^0.8.0", - "@emotion/utils": "^1.2.0", + "@emotion/hash": "^0.9.1", + "@emotion/memoize": "^0.8.1", + "@emotion/unitless": "^0.8.1", + "@emotion/utils": "^1.2.1", "csstype": "^3.0.2" } }, @@ -9032,9 +6368,9 @@ } }, "@emotion/sheet": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@emotion/sheet/-/sheet-1.2.1.tgz", - "integrity": "sha512-zxRBwl93sHMsOj4zs+OslQKg/uhF38MB+OMKoCrVuS0nyTkqnau+BM3WGEoOptg9Oz45T/aIGs1qbVAsEFo3nA==" + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@emotion/sheet/-/sheet-1.2.2.tgz", + "integrity": "sha512-0QBtGvaqtWi+nx6doRwDdBIzhNdZrXUppvTM4dtZZWEGTXL/XE/yJxLMGlDT1Gt+UHH5IX1n+jkXyytE/av7OA==" }, "@emotion/styled": { "version": "11.10.6", @@ -9050,25 +6386,25 @@ } }, "@emotion/unitless": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.8.0.tgz", - "integrity": "sha512-VINS5vEYAscRl2ZUDiT3uMPlrFQupiKgHz5AA4bCH1miKBg4qtwkim1qPmJj/4WG6TreYMY111rEFsjupcOKHw==" + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.8.1.tgz", + "integrity": "sha512-KOEGMu6dmJZtpadb476IsZBclKvILjopjUii3V+7MnXIQCYh8W3NgNcgwo21n9LXZX6EDIKvqfjYxXebDwxKmQ==" }, "@emotion/use-insertion-effect-with-fallbacks": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.0.0.tgz", - "integrity": "sha512-1eEgUGmkaljiBnRMTdksDV1W4kUnmwgp7X9G8B++9GYwl1lUdqSndSriIrTJ0N7LQaoauY9JJ2yhiOYK5+NI4A==", + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.0.1.tgz", + "integrity": "sha512-jT/qyKZ9rzLErtrjGgdkMBn2OP8wl0G3sQlBb3YPryvKHsjvINUhVaPFfP+fpBcOkmrVOVEEHQFJ7nbj2TH2gw==", "requires": {} }, "@emotion/utils": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@emotion/utils/-/utils-1.2.0.tgz", - "integrity": "sha512-sn3WH53Kzpw8oQ5mgMmIzzyAaH2ZqFEbozVVBSYp538E06OSE6ytOp7pRAjNQR+Q/orwqdQYJSe2m3hCOeznkw==" + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@emotion/utils/-/utils-1.2.1.tgz", + "integrity": "sha512-Y2tGf3I+XVnajdItskUCn6LX+VUDmP6lTL4fcqsXAv43dnlbZiuW4MWQW38rW/BVWSE7Q/7+XQocmpnRYILUmg==" }, "@emotion/weak-memoize": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/@emotion/weak-memoize/-/weak-memoize-0.3.0.tgz", - "integrity": "sha512-AHPmaAx+RYfZz0eYu6Gviiagpmiyw98ySSlQvCUhVGDRtDFe4DBS0x1bSjdF3gqUDYOczB+yYvBTtEylYSdRhg==" + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/@emotion/weak-memoize/-/weak-memoize-0.3.1.tgz", + "integrity": "sha512-EsBwpc7hBUJWAsNPBmJy4hxWx12v6bshQsldrVmjxJoc3isbxhOrF2IcCpaXxfvq03NwkI7sbsOLXbYuqF/8Ww==" }, "@eslint/eslintrc": { "version": "1.4.1", @@ -9085,6 +6421,23 @@ "js-yaml": "^4.1.0", "minimatch": "^3.1.2", "strip-json-comments": "^3.1.1" + }, + "dependencies": { + "globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "requires": { + "type-fest": "^0.20.2" + } + }, + "type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true + } } }, "@heroicons/react": { @@ -9094,13 +6447,13 @@ "requires": {} }, "@humanwhocodes/config-array": { - "version": "0.11.8", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.8.tgz", - "integrity": "sha512-UybHIJzJnR5Qc/MsD9Kr+RpO2h+/P1GhOwdiLPXK5TWk5sgTdu88bTD9UP+CKbPPh5Rni1u0GjAdYQLemG8g+g==", + "version": "0.11.14", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", + "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", "dev": true, "requires": { - "@humanwhocodes/object-schema": "^1.2.1", - "debug": "^4.1.1", + "@humanwhocodes/object-schema": "^2.0.2", + "debug": "^4.3.1", "minimatch": "^3.0.5" } }, @@ -9111,43 +6464,74 @@ "dev": true }, "@humanwhocodes/object-schema": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz", - "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", "dev": true }, - "@jridgewell/gen-mapping": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", - "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", + "@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dev": true, "requires": { - "@jridgewell/set-array": "^1.0.1", + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "dependencies": { + "ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "dev": true + }, + "strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "requires": { + "ansi-regex": "^6.0.1" + } + } + } + }, + "@jridgewell/gen-mapping": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", + "requires": { + "@jridgewell/set-array": "^1.2.1", "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.9" + "@jridgewell/trace-mapping": "^0.3.24" } }, "@jridgewell/resolve-uri": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz", - "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==" + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==" }, "@jridgewell/set-array": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", - "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==" + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==" }, "@jridgewell/sourcemap-codec": { - "version": "1.4.14", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz", - "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==" + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" }, "@jridgewell/trace-mapping": { - "version": "0.3.18", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.18.tgz", - "integrity": "sha512-w+niJYzMHdd7USdiH2U6869nqhD2nbfZXND5Yp93qIbEmnDNk7PD48o+YchRVpzMU7M6jVCbenTR7PA1FLQ9pA==", + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", "requires": { - "@jridgewell/resolve-uri": "3.1.0", - "@jridgewell/sourcemap-codec": "1.4.14" + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" } }, "@mui/base": { @@ -9166,9 +6550,9 @@ } }, "@mui/core-downloads-tracker": { - "version": "5.11.9", - "resolved": "https://registry.npmjs.org/@mui/core-downloads-tracker/-/core-downloads-tracker-5.11.9.tgz", - "integrity": "sha512-YGEtucQ/Nl91VZkzYaLad47Cdui51n/hW+OQm4210g4N3/nZzBxmGeKfubEalf+ShKH4aYDS86XTO6q/TpZnjQ==" + "version": "5.15.20", + "resolved": "https://registry.npmjs.org/@mui/core-downloads-tracker/-/core-downloads-tracker-5.15.20.tgz", + "integrity": "sha512-DoL2ppgldL16utL8nNyj/P12f8mCNdx/Hb/AJnX9rLY4b52hCMIx1kH83pbXQ6uMy6n54M3StmEbvSGoj2OFuA==" }, "@mui/lab": { "version": "5.0.0-alpha.120", @@ -9205,24 +6589,43 @@ } }, "@mui/private-theming": { - "version": "5.11.9", - "resolved": "https://registry.npmjs.org/@mui/private-theming/-/private-theming-5.11.9.tgz", - "integrity": "sha512-XMyVIFGomVCmCm92EvYlgq3zrC9K+J6r7IKl/rBJT2/xVYoRY6uM7jeB+Wxh7kXxnW9Dbqsr2yL3cx6wSD1sAg==", + "version": "5.15.20", + "resolved": "https://registry.npmjs.org/@mui/private-theming/-/private-theming-5.15.20.tgz", + "integrity": "sha512-BK8F94AIqSrnaPYXf2KAOjGZJgWfvqAVQ2gVR3EryvQFtuBnG6RwodxrCvd3B48VuMy6Wsk897+lQMUxJyk+6g==", "requires": { - "@babel/runtime": "^7.20.13", - "@mui/utils": "^5.11.9", + "@babel/runtime": "^7.23.9", + "@mui/utils": "^5.15.20", "prop-types": "^15.8.1" } }, "@mui/styled-engine": { - "version": "5.11.9", - "resolved": "https://registry.npmjs.org/@mui/styled-engine/-/styled-engine-5.11.9.tgz", - "integrity": "sha512-bkh2CjHKOMy98HyOc8wQXEZvhOmDa/bhxMUekFX5IG0/w4f5HJ8R6+K6nakUUYNEgjOWPYzNPrvGB8EcGbhahQ==", + "version": "5.15.14", + "resolved": "https://registry.npmjs.org/@mui/styled-engine/-/styled-engine-5.15.14.tgz", + "integrity": "sha512-RILkuVD8gY6PvjZjqnWhz8fu68dVkqhM5+jYWfB5yhlSQKg+2rHkmEwm75XIeAqI3qwOndK6zELK5H6Zxn4NHw==", "requires": { - "@babel/runtime": "^7.20.13", - "@emotion/cache": "^11.10.5", - "csstype": "^3.1.1", + "@babel/runtime": "^7.23.9", + "@emotion/cache": "^11.11.0", + "csstype": "^3.1.3", "prop-types": "^15.8.1" + }, + "dependencies": { + "@emotion/cache": { + "version": "11.11.0", + "resolved": "https://registry.npmjs.org/@emotion/cache/-/cache-11.11.0.tgz", + "integrity": "sha512-P34z9ssTCBi3e9EI1ZsWpNHcfY1r09ZO0rZbRO2ob3ZQMnFI35jB536qoXbkdesr5EUhYi22anuEJuyxifaqAQ==", + "requires": { + "@emotion/memoize": "^0.8.1", + "@emotion/sheet": "^1.2.2", + "@emotion/utils": "^1.2.1", + "@emotion/weak-memoize": "^0.3.1", + "stylis": "4.2.0" + } + }, + "stylis": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", + "integrity": "sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==" + } } }, "@mui/system": { @@ -9241,19 +6644,18 @@ } }, "@mui/types": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/@mui/types/-/types-7.2.3.tgz", - "integrity": "sha512-tZ+CQggbe9Ol7e/Fs5RcKwg/woU+o8DCtOnccX6KmbBc7YrfqMYEYuaIcXHuhpT880QwNkZZ3wQwvtlDFA2yOw==", + "version": "7.2.14", + "resolved": "https://registry.npmjs.org/@mui/types/-/types-7.2.14.tgz", + "integrity": "sha512-MZsBZ4q4HfzBsywtXgM1Ksj6HDThtiwmOKUXH1pKYISI9gAVXCNHNpo7TlGoGrBaYWZTdNoirIN7JsQcQUjmQQ==", "requires": {} }, "@mui/utils": { - "version": "5.11.9", - "resolved": "https://registry.npmjs.org/@mui/utils/-/utils-5.11.9.tgz", - "integrity": "sha512-eOJaqzcEs4qEwolcvFAmXGpln+uvouvOS9FUX6Wkrte+4I8rZbjODOBDVNlK+V6/ziTfD4iNKC0G+KfOTApbqg==", + "version": "5.15.20", + "resolved": "https://registry.npmjs.org/@mui/utils/-/utils-5.15.20.tgz", + "integrity": "sha512-mAbYx0sovrnpAu1zHc3MDIhPqL8RPVC5W5xcO1b7PiSCJPtckIZmBkp8hefamAvUiAV8gpfMOM6Zb+eSisbI2A==", "requires": { - "@babel/runtime": "^7.20.13", - "@types/prop-types": "^15.7.5", - "@types/react-is": "^16.7.1 || ^17.0.0", + "@babel/runtime": "^7.23.9", + "@types/prop-types": "^15.7.11", "prop-types": "^15.8.1", "react-is": "^18.2.0" } @@ -9278,77 +6680,104 @@ } }, "@next/env": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/env/-/env-13.5.6.tgz", - "integrity": "sha512-Yac/bV5sBGkkEXmAX5FWPS9Mmo2rthrOPRQQNfycJPkjUAUclomCPH7QFVCDQ4Mp2k2K1SSM6m0zrxYrOwtFQw==" + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.4.tgz", + "integrity": "sha512-3EtkY5VDkuV2+lNmKlbkibIJxcO4oIHEhBWne6PaAp+76J9KoSsGvNikp6ivzAT8dhhBMYrm6op2pS1ApG0Hzg==" }, "@next/eslint-plugin-next": { - "version": "13.1.6", - "resolved": "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-13.1.6.tgz", - "integrity": "sha512-o7cauUYsXjzSJkay8wKjpKJf2uLzlggCsGUkPu3lP09Pv97jYlekTC20KJrjQKmSv5DXV0R/uks2ZXhqjNkqAw==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-14.2.4.tgz", + "integrity": "sha512-svSFxW9f3xDaZA3idQmlFw7SusOuWTpDTAeBlO3AEPDltrraV+lqs7mAc6A27YdnpQVVIA3sODqUAAHdWhVWsA==", "dev": true, "requires": { - "glob": "7.1.7" + "glob": "10.3.10" + }, + "dependencies": { + "brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "requires": { + "balanced-match": "^1.0.0" + } + }, + "glob": { + "version": "10.3.10", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.10.tgz", + "integrity": "sha512-fa46+tv1Ak0UPK1TOy/pZrIybNNt4HCv7SDzwyfiOZkvZLEbjsZkJBPtDHVshZjbecAoAGSC20MjLDG/qr679g==", + "dev": true, + "requires": { + "foreground-child": "^3.1.0", + "jackspeak": "^2.3.5", + "minimatch": "^9.0.1", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0", + "path-scurry": "^1.10.1" + } + }, + "minimatch": { + "version": "9.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.4.tgz", + "integrity": "sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw==", + "dev": true, + "requires": { + "brace-expansion": "^2.0.1" + } + } } }, "@next/swc-darwin-arm64": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-13.5.6.tgz", - "integrity": "sha512-5nvXMzKtZfvcu4BhtV0KH1oGv4XEW+B+jOfmBdpFI3C7FrB/MfujRpWYSBBO64+qbW8pkZiSyQv9eiwnn5VIQA==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.4.tgz", + "integrity": "sha512-AH3mO4JlFUqsYcwFUHb1wAKlebHU/Hv2u2kb1pAuRanDZ7pD/A/KPD98RHZmwsJpdHQwfEc/06mgpSzwrJYnNg==", "optional": true }, "@next/swc-darwin-x64": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-13.5.6.tgz", - "integrity": "sha512-6cgBfxg98oOCSr4BckWjLLgiVwlL3vlLj8hXg2b+nDgm4bC/qVXXLfpLB9FHdoDu4057hzywbxKvmYGmi7yUzA==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.4.tgz", + "integrity": "sha512-QVadW73sWIO6E2VroyUjuAxhWLZWEpiFqHdZdoQ/AMpN9YWGuHV8t2rChr0ahy+irKX5mlDU7OY68k3n4tAZTg==", "optional": true }, "@next/swc-linux-arm64-gnu": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-13.5.6.tgz", - "integrity": "sha512-txagBbj1e1w47YQjcKgSU4rRVQ7uF29YpnlHV5xuVUsgCUf2FmyfJ3CPjZUvpIeXCJAoMCFAoGnbtX86BK7+sg==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.4.tgz", + "integrity": "sha512-KT6GUrb3oyCfcfJ+WliXuJnD6pCpZiosx2X3k66HLR+DMoilRb76LpWPGb4tZprawTtcnyrv75ElD6VncVamUQ==", "optional": true }, "@next/swc-linux-arm64-musl": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-13.5.6.tgz", - "integrity": "sha512-cGd+H8amifT86ZldVJtAKDxUqeFyLWW+v2NlBULnLAdWsiuuN8TuhVBt8ZNpCqcAuoruoSWynvMWixTFcroq+Q==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.4.tgz", + "integrity": "sha512-Alv8/XGSs/ytwQcbCHwze1HmiIkIVhDHYLjczSVrf0Wi2MvKn/blt7+S6FJitj3yTlMwMxII1gIJ9WepI4aZ/A==", "optional": true }, "@next/swc-linux-x64-gnu": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-13.5.6.tgz", - "integrity": "sha512-Mc2b4xiIWKXIhBy2NBTwOxGD3nHLmq4keFk+d4/WL5fMsB8XdJRdtUlL87SqVCTSaf1BRuQQf1HvXZcy+rq3Nw==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.4.tgz", + "integrity": "sha512-ze0ShQDBPCqxLImzw4sCdfnB3lRmN3qGMB2GWDRlq5Wqy4G36pxtNOo2usu/Nm9+V2Rh/QQnrRc2l94kYFXO6Q==", "optional": true }, "@next/swc-linux-x64-musl": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-13.5.6.tgz", - "integrity": "sha512-CFHvP9Qz98NruJiUnCe61O6GveKKHpJLloXbDSWRhqhkJdZD2zU5hG+gtVJR//tyW897izuHpM6Gtf6+sNgJPQ==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.4.tgz", + "integrity": "sha512-8dwC0UJoc6fC7PX70csdaznVMNr16hQrTDAMPvLPloazlcaWfdPogq+UpZX6Drqb1OBlwowz8iG7WR0Tzk/diQ==", "optional": true }, "@next/swc-win32-arm64-msvc": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-13.5.6.tgz", - "integrity": "sha512-aFv1ejfkbS7PUa1qVPwzDHjQWQtknzAZWGTKYIAaS4NMtBlk3VyA6AYn593pqNanlicewqyl2jUhQAaFV/qXsg==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.4.tgz", + "integrity": "sha512-jxyg67NbEWkDyvM+O8UDbPAyYRZqGLQDTPwvrBBeOSyVWW/jFQkQKQ70JDqDSYg1ZDdl+E3nkbFbq8xM8E9x8A==", "optional": true }, "@next/swc-win32-ia32-msvc": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-13.5.6.tgz", - "integrity": "sha512-XqqpHgEIlBHvzwG8sp/JXMFkLAfGLqkbVsyN+/Ih1mR8INb6YCc2x/Mbwi6hsAgUnqQztz8cvEbHJUbSl7RHDg==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.4.tgz", + "integrity": "sha512-twrmN753hjXRdcrZmZttb/m5xaCBFa48Dt3FbeEItpJArxriYDunWxJn+QFXdJ3hPkm4u7CKxncVvnmgQMY1ag==", "optional": true }, "@next/swc-win32-x64-msvc": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-13.5.6.tgz", - "integrity": "sha512-Cqfe1YmOS7k+5mGu92nl5ULkzpKuxJrP3+4AEuPmrpFZ3BHxTY3TnHmU1On3bFmFFs6FbTcdF58CCUProGpIGQ==", - "optional": true - }, - "@nicolo-ribaudo/chokidar-2": { - "version": "2.1.8-no-fsevents.3", - "resolved": "https://registry.npmjs.org/@nicolo-ribaudo/chokidar-2/-/chokidar-2-2.1.8-no-fsevents.3.tgz", - "integrity": "sha512-s88O1aVtXftvp5bCPB7WnmXc5IwOZZ7YPuwNPt+GtOOXpPvad1LfbmjYv+qII7zP6RU2QGnqve27dnLycEnyEQ==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.4.tgz", + "integrity": "sha512-tkLrjBzqFTP8DVrAAQmZelEahfR9OxWpFR++vAI9FBhCiIxtwHwBHC23SBHCTURBtwB4kc/x44imVOnkKGNVGg==", "optional": true }, "@nodelib/fs.scandir": { @@ -9377,56 +6806,47 @@ "fastq": "^1.6.0" } }, - "@pkgr/utils": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/@pkgr/utils/-/utils-2.3.1.tgz", - "integrity": "sha512-wfzX8kc1PMyUILA+1Z/EqoE4UCXGy0iRGMhPwdfae1+f0OXlLqCk+By+aMzgJBzR9AzS4CDizioG6Ss1gvAFJw==", + "@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", "dev": true, - "requires": { - "cross-spawn": "^7.0.3", - "is-glob": "^4.0.3", - "open": "^8.4.0", - "picocolors": "^1.0.0", - "tiny-glob": "^0.2.9", - "tslib": "^2.4.0" - }, - "dependencies": { - "tslib": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.5.0.tgz", - "integrity": "sha512-336iVw3rtn2BUK7ORdIAHTyxHGRIHVReokCR3XjbckJMK7ms8FysBfhLR8IXnAgy7T0PTPNBWKiH514FOW/WSg==", - "dev": true - } - } + "optional": true }, "@popperjs/core": { - "version": "2.11.6", - "resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.6.tgz", - "integrity": "sha512-50/17A98tWUfQ176raKiOGXuYpLyyVMkxxG6oylzL3BPOlA6ADGdK7EYunSa4I064xerltq9TGXs8HmOk5E+vw==" + "version": "2.11.8", + "resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.8.tgz", + "integrity": "sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A==" }, "@rushstack/eslint-patch": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.2.0.tgz", - "integrity": "sha512-sXo/qW2/pAcmT43VoRKOJbDOfV3cYpq3szSVfIThQXNt+E4DfKj361vaAt3c88U5tPUxzEswam7GW48PJqtKAg==", + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.10.3.tgz", + "integrity": "sha512-qC/xYId4NMebE6w/V33Fh9gWxLgURiNYgVNObbJl2LZv0GUUItCcCqC5axQSwRaAgaxl2mELq1rMzlswaQ0Zxg==", "dev": true }, "@socket.io/component-emitter": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@socket.io/component-emitter/-/component-emitter-3.1.0.tgz", - "integrity": "sha512-+9jVqKhRSpsc591z5vX+X5Yyw+he/HCB4iQ/RYxw35CEPaY1gnsNE43nf9n9AaYjAQrTiI/mOwKUKdUs9vf7Xg==" + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@socket.io/component-emitter/-/component-emitter-3.1.2.tgz", + "integrity": "sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==" + }, + "@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==" }, "@swc/helpers": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.2.tgz", - "integrity": "sha512-E4KcWTpoLHqwPHLxidpOqQbcrZVgi0rsmmZXUle1jXmJfuIf/UWpczUJ7MZZ5tlxytgJXyp0w4PGkkeLiuIdZw==", + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz", + "integrity": "sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==", "requires": { + "@swc/counter": "^0.1.3", "tslib": "^2.4.0" }, "dependencies": { "tslib": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", - "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.3.tgz", + "integrity": "sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==" } } }, @@ -9436,6 +6856,19 @@ "integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==", "dev": true }, + "@types/lodash": { + "version": "4.17.5", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.5.tgz", + "integrity": "sha512-MBIOHVZqVqgfro1euRDWX7OO0fBVUUMrN6Pwm8LQsz8cWhEpihlvR70ENj3f40j58TNxZaWv2ndSkInykNBBJw==" + }, + "@types/lodash-es": { + "version": "4.17.12", + "resolved": "https://registry.npmjs.org/@types/lodash-es/-/lodash-es-4.17.12.tgz", + "integrity": "sha512-0NgftHUcV4v34VhXm8QBSftKVXtbkBG3ViCjs6+eJ5a6y6Mi/jiFGPc1sC7QK+9BFhWrURE3EOggmWaSxL9OzQ==", + "requires": { + "@types/lodash": "*" + } + }, "@types/node": { "version": "18.13.0", "resolved": "https://registry.npmjs.org/@types/node/-/node-18.13.0.tgz", @@ -9455,14 +6888,14 @@ "dev": true }, "@types/parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==" + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.2.tgz", + "integrity": "sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==" }, "@types/prop-types": { - "version": "15.7.5", - "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz", - "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==" + "version": "15.7.12", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.12.tgz", + "integrity": "sha512-5zvhXYtRNRluoE/jAp4GVsSduVUzNWKkOZrCDBWYtE7biZywwdC2AcEzg+cSMLFRfVgeAFqpfNabiPjxFddV1Q==" }, "@types/react": { "version": "18.0.28", @@ -9483,63 +6916,60 @@ "@types/react": "*" } }, - "@types/react-is": { - "version": "17.0.3", - "resolved": "https://registry.npmjs.org/@types/react-is/-/react-is-17.0.3.tgz", - "integrity": "sha512-aBTIWg1emtu95bLTLx0cpkxwGW3ueZv71nE2YFBpL8k/z5czEW8yYpOo8Dp+UUAFAtKwNaOsh/ioSeQnWlZcfw==", - "requires": { - "@types/react": "*" - } - }, "@types/react-transition-group": { - "version": "4.4.5", - "resolved": "https://registry.npmjs.org/@types/react-transition-group/-/react-transition-group-4.4.5.tgz", - "integrity": "sha512-juKD/eiSM3/xZYzjuzH6ZwpP+/lejltmiS3QEzV/vmb/Q8+HfDmxu+Baga8UEMGBqV88Nbg4l2hY/K2DkyaLLA==", + "version": "4.4.10", + "resolved": "https://registry.npmjs.org/@types/react-transition-group/-/react-transition-group-4.4.10.tgz", + "integrity": "sha512-hT/+s0VQs2ojCX823m60m5f0sL5idt9SO6Tj6Dg+rdphGPIeJbJ6CxvBYkgkGKrYeDjvIpKTR38UzmtHJOGW3Q==", "requires": { "@types/react": "*" } }, "@types/scheduler": { - "version": "0.16.2", - "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.2.tgz", - "integrity": "sha512-hppQEBDmlwhFAXKJX2KnWLYu5yMfi91yazPb2l+lbJiwW+wdo1gNeRA+3RgNSO39WYX2euey41KEwnqesU2Jew==" + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.23.0.tgz", + "integrity": "sha512-YIoDCTH3Af6XM5VuwGG/QL/CJqga1Zm3NkU3HZ4ZHK2fRMPYP1VczsTUqtsf43PH/iJNVlPHAo2oWX7BSdB2Hw==" + }, + "@types/stylis": { + "version": "4.2.5", + "resolved": "https://registry.npmjs.org/@types/stylis/-/stylis-4.2.5.tgz", + "integrity": "sha512-1Xve+NMN7FWjY14vLoY5tL3BVEQ/n42YLwaqJIPYhotZ9uBHt87VceMwWQpzmdEt2TNXIorIFG+YeCUUW7RInw==" }, "@typescript-eslint/parser": { - "version": "5.53.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.53.0.tgz", - "integrity": "sha512-MKBw9i0DLYlmdOb3Oq/526+al20AJZpANdT6Ct9ffxcV8nKCHz63t/S0IhlTFNsBIHJv+GY5SFJ0XfqVeydQrQ==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.62.0.tgz", + "integrity": "sha512-VlJEV0fOQ7BExOsHYAGrgbEiZoi8D+Bl2+f6V2RrXerRSylnp+ZBHmPvaIa8cz0Ajx7WO7Z5RqfgYg7ED1nRhA==", "dev": true, "requires": { - "@typescript-eslint/scope-manager": "5.53.0", - "@typescript-eslint/types": "5.53.0", - "@typescript-eslint/typescript-estree": "5.53.0", + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/typescript-estree": "5.62.0", "debug": "^4.3.4" } }, "@typescript-eslint/scope-manager": { - "version": "5.53.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.53.0.tgz", - "integrity": "sha512-Opy3dqNsp/9kBBeCPhkCNR7fmdSQqA+47r21hr9a14Bx0xnkElEQmhoHga+VoaoQ6uDHjDKmQPIYcUcKJifS7w==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.62.0.tgz", + "integrity": "sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w==", "dev": true, "requires": { - "@typescript-eslint/types": "5.53.0", - "@typescript-eslint/visitor-keys": "5.53.0" + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/visitor-keys": "5.62.0" } }, "@typescript-eslint/types": { - "version": "5.53.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.53.0.tgz", - "integrity": "sha512-5kcDL9ZUIP756K6+QOAfPkigJmCPHcLN7Zjdz76lQWWDdzfOhZDTj1irs6gPBKiXx5/6O3L0+AvupAut3z7D2A==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.62.0.tgz", + "integrity": "sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==", "dev": true }, "@typescript-eslint/typescript-estree": { - "version": "5.53.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.53.0.tgz", - "integrity": "sha512-eKmipH7QyScpHSkhbptBBYh9v8FxtngLquq292YTEQ1pxVs39yFBlLC1xeIZcPPz1RWGqb7YgERJRGkjw8ZV7w==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.62.0.tgz", + "integrity": "sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==", "dev": true, "requires": { - "@typescript-eslint/types": "5.53.0", - "@typescript-eslint/visitor-keys": "5.53.0", + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/visitor-keys": "5.62.0", "debug": "^4.3.4", "globby": "^11.1.0", "is-glob": "^4.0.3", @@ -9548,19 +6978,19 @@ } }, "@typescript-eslint/visitor-keys": { - "version": "5.53.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.53.0.tgz", - "integrity": "sha512-JqNLnX3leaHFZEN0gCh81sIvgrp/2GOACZNgO4+Tkf64u51kTpAyWFOY8XHx8XuXr3N2C9zgPPHtcpMg6z1g0w==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.62.0.tgz", + "integrity": "sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==", "dev": true, "requires": { - "@typescript-eslint/types": "5.53.0", + "@typescript-eslint/types": "5.62.0", "eslint-visitor-keys": "^3.3.0" } }, "acorn": { - "version": "8.8.2", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz", - "integrity": "sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==", + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", + "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", "dev": true }, "acorn-jsx": { @@ -9589,22 +7019,11 @@ "dev": true }, "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", "requires": { - "color-convert": "^2.0.1" - } - }, - "anymatch": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", - "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", - "optional": true, - "requires": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" + "color-convert": "^1.9.0" } }, "apexcharts": { @@ -9627,24 +7046,35 @@ "dev": true }, "aria-query": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.1.3.tgz", - "integrity": "sha512-R5iJ5lkuHybztUfuOAznmboyjWq8O6sqNqtK7CLOqdydi54VNbORp49mb14KbWgG1QD3JFO9hJdZ+y4KutfdOQ==", + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz", + "integrity": "sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==", "dev": true, "requires": { - "deep-equal": "^2.0.5" + "dequal": "^2.0.3" + } + }, + "array-buffer-byte-length": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.1.tgz", + "integrity": "sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg==", + "dev": true, + "requires": { + "call-bind": "^1.0.5", + "is-array-buffer": "^3.0.4" } }, "array-includes": { - "version": "3.1.6", - "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.6.tgz", - "integrity": "sha512-sgTbLvL6cNnw24FnbaDyjmvddQ2ML8arZsgaJhoABMoplz/4QRhtrYS+alr1BUM1Bwp6dhx8vVCBSLG+StwOFw==", + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.8.tgz", + "integrity": "sha512-itaWrbYbqpGXkGhZPGUulwnhVf5Hpy1xiCFsGqyIGglbBxmG5vSjxQen3/WGOjPpNEv1RtBLKxbmVXm8HpJStQ==", "dev": true, "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "get-intrinsic": "^1.1.3", + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.4", "is-string": "^1.0.7" } }, @@ -9654,68 +7084,127 @@ "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", "dev": true }, + "array.prototype.findlast": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.findlast/-/array.prototype.findlast-1.2.5.tgz", + "integrity": "sha512-CVvd6FHg1Z3POpBLxO6E6zr+rSKEQ9L6rZHAaY7lLfhKsWYUBBOuMs0e9o24oopj6H+geRCX0YJ+TJLBK2eHyQ==", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-shim-unscopables": "^1.0.2" + } + }, + "array.prototype.findlastindex": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.5.tgz", + "integrity": "sha512-zfETvRFA8o7EiNn++N5f/kaCw221hrpGsDmcpndVupkPzEc1Wuf3VgC0qby1BbHs7f5DVYjgtEU2LLh5bqeGfQ==", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-shim-unscopables": "^1.0.2" + } + }, "array.prototype.flat": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.1.tgz", - "integrity": "sha512-roTU0KWIOmJ4DRLmwKd19Otg0/mT3qPNt0Qb3GWW8iObuZXxrjB/pzn0R3hqpRSWg4HCwqx+0vwOnWnvlOyeIA==", + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.2.tgz", + "integrity": "sha512-djYB+Zx2vLewY8RWlNCUdHjDXs2XOgm602S9E7P/UpHgfeHL00cRiIF+IN/G/aUJ7kGPb6yO/ErDI5V2s8iycA==", "dev": true, "requires": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", "es-shim-unscopables": "^1.0.0" } }, "array.prototype.flatmap": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.1.tgz", - "integrity": "sha512-8UGn9O1FDVvMNB0UlLv4voxRMze7+FpHyF5mSMRjWHUMlpoDViniy05870VlxhfgTnLbpuwTzvD76MTtWxB/mQ==", + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.2.tgz", + "integrity": "sha512-Ewyx0c9PmpcsByhSW4r+9zDU7sGjFc86qf/kKtuSCRdhfbk0SNLLkaT5qvcHnRGgc5NP/ly/y+qkXkqONX54CQ==", "dev": true, "requires": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-shim-unscopables": "^1.0.0" + } + }, + "array.prototype.toreversed": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/array.prototype.toreversed/-/array.prototype.toreversed-1.1.2.tgz", + "integrity": "sha512-wwDCoT4Ck4Cz7sLtgUmzR5UV3YF5mFHUlbChCzZBQZ+0m2cl/DH3tKgvphv1nKgFsJ48oCSg6p91q2Vm0I/ZMA==", + "dev": true, + "requires": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", "es-shim-unscopables": "^1.0.0" } }, "array.prototype.tosorted": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.1.tgz", - "integrity": "sha512-pZYPXPRl2PqWcsUs6LOMn+1f1532nEoPTYowBtqLwAW+W8vSVhkIGnmOX1t/UQjD6YGI0vcD2B1U7ZFGQH9jnQ==", + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.4.tgz", + "integrity": "sha512-p6Fx8B7b7ZhL/gmUsAy0D15WhvDccw3mnGNbZpi3pmeJdxtWsj2jEaI4Y6oo3XiHfzuSgPwKc04MYt6KgvC/wA==", "dev": true, "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "es-shim-unscopables": "^1.0.0", - "get-intrinsic": "^1.1.3" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3", + "es-errors": "^1.3.0", + "es-shim-unscopables": "^1.0.2" + } + }, + "arraybuffer.prototype.slice": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.3.tgz", + "integrity": "sha512-bMxMKAjg13EBSVscxTaYA4mRc5t1UAXa2kXiGTNfZ079HIWXEkKmkgFrh/nJqamaLSrXO5H4WFFkPEaLJWbs3A==", + "dev": true, + "requires": { + "array-buffer-byte-length": "^1.0.1", + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "es-abstract": "^1.22.3", + "es-errors": "^1.2.1", + "get-intrinsic": "^1.2.3", + "is-array-buffer": "^3.0.4", + "is-shared-array-buffer": "^1.0.2" } }, "ast-types-flow": { - "version": "0.0.7", - "resolved": "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.7.tgz", - "integrity": "sha512-eBvWn1lvIApYMhzQMsu9ciLfkBY499mFZlNqG+/9WR7PVlroQw0vG30cOQQbaKz3sCEc44TAOu2ykzqXSNnwag==", + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.8.tgz", + "integrity": "sha512-OH/2E5Fg20h2aPrbe+QL8JZQFko0YZaF+j4mnQ7BGhfavO7OpSLa8a0y9sBwomHdSbkhTS8TQNayBfnW5DwbvQ==", "dev": true }, "available-typed-arrays": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz", - "integrity": "sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==", - "dev": true + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", + "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", + "dev": true, + "requires": { + "possible-typed-array-names": "^1.0.0" + } }, "axe-core": { - "version": "4.6.3", - "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.6.3.tgz", - "integrity": "sha512-/BQzOX780JhsxDnPpH4ZiyrJAzcd8AfzFPkv+89veFSr1rcMjuq2JDCwypKaPeB6ljHp9KjXhPpjgCvQlWYuqg==", + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.7.0.tgz", + "integrity": "sha512-M0JtH+hlOL5pLQwHOLNYZaXuhqmvS8oExsqB1SBYgA4Dk7u/xx+YdGHXaK5pyUfed5mYXdlYiphWq3G8cRi5JQ==", "dev": true }, "axobject-query": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-3.1.1.tgz", - "integrity": "sha512-goKlv8DZrK9hUh975fnHzhNIO4jUnFCfv/dszV5VwUGDFjI6vQ2VwoyjYjYNEbBE8AH87TduWP5uyDR1D+Iteg==", + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-3.2.1.tgz", + "integrity": "sha512-jsyHu61e6N4Vbz/v18DHwWYKK0bSWLqn47eeDSKPB7m8tqMHF9YJ+mhIk2lVteyZrY8tnSj/jHOv4YiTCuCJgg==", "dev": true, "requires": { - "deep-equal": "^2.0.5" + "dequal": "^2.0.3" } }, "babel-plugin-macros": { @@ -9728,90 +7217,34 @@ "resolve": "^1.19.0" } }, - "babel-plugin-polyfill-corejs2": { - "version": "0.4.3", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.3.tgz", - "integrity": "sha512-bM3gHc337Dta490gg+/AseNB9L4YLHxq1nGKZZSHbhXv4aTYU2MD2cjza1Ru4S6975YLTaL1K8uJf6ukJhhmtw==", - "requires": { - "@babel/compat-data": "^7.17.7", - "@babel/helper-define-polyfill-provider": "^0.4.0", - "semver": "^6.1.1" - }, - "dependencies": { - "semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==" - } - } - }, - "babel-plugin-polyfill-corejs3": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.8.1.tgz", - "integrity": "sha512-ikFrZITKg1xH6pLND8zT14UPgjKHiGLqex7rGEZCH2EvhsneJaJPemmpQaIZV5AL03II+lXylw3UmddDK8RU5Q==", - "requires": { - "@babel/helper-define-polyfill-provider": "^0.4.0", - "core-js-compat": "^3.30.1" - } - }, - "babel-plugin-polyfill-regenerator": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.5.0.tgz", - "integrity": "sha512-hDJtKjMLVa7Z+LwnTCxoDLQj6wdc+B8dun7ayF2fYieI6OzfuvcLMB32ihJZ4UhCBwNYGl5bg/x/P9cMdnkc2g==", - "requires": { - "@babel/helper-define-polyfill-provider": "^0.4.0" - } - }, "balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true }, "base64-js": { "version": "1.5.1", "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==" }, - "binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", - "optional": true - }, "brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, "requires": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "devOptional": true, + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, "requires": { - "fill-range": "^7.0.1" - } - }, - "browserslist": { - "version": "4.21.9", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.9.tgz", - "integrity": "sha512-M0MFoZzbUrRU4KNfCrDLnvyE7gub+peetoTid3TBIqtunaDJyXlwhakT+/VkvSXcfIzFfK/nkCs4nmyTmxdNSg==", - "requires": { - "caniuse-lite": "^1.0.30001503", - "electron-to-chromium": "^1.4.431", - "node-releases": "^2.0.12", - "update-browserslist-db": "^1.0.11" - }, - "dependencies": { - "caniuse-lite": { - "version": "1.0.30001504", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001504.tgz", - "integrity": "sha512-5uo7eoOp2mKbWyfMXnGO9rJWOGU8duvzEiYITW+wivukL7yHH4gX9yuRaobu6El4jPxo6jKZfG+N6fB621GD/Q==" - } + "fill-range": "^7.1.1" } }, "buffer": { @@ -9837,13 +7270,16 @@ } }, "call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", "dev": true, "requires": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" } }, "callsites": { @@ -9862,44 +7298,24 @@ "integrity": "sha512-ceOhN1DL7Y4O6M0j9ICgmTYziV89WMd96SvSl0REd8PMgrY0B/WBOPoed5S1KUmJqXgUXh8gzSe6E3ae27upsQ==" }, "caniuse-lite": { - "version": "1.0.30001457", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001457.tgz", - "integrity": "sha512-SDIV6bgE1aVbK6XyxdURbUE89zY7+k1BBBaOwYwkNCglXlel/E7mELiHC64HQ+W0xSKlqWhV9Wh7iHxUjMs4fA==" + "version": "1.0.30001633", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001633.tgz", + "integrity": "sha512-6sT0yf/z5jqf8tISAgpJDrmwOpLsrpnyCdD/lOZKvKkkJK4Dn0X5i7KF7THEZhOq+30bmhwBlNEaqPUiHiKtZg==" }, "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "chokidar": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", - "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", - "optional": true, - "requires": { - "anymatch": "~3.1.2", - "braces": "~3.0.2", - "fsevents": "~2.3.2", - "glob-parent": "~5.1.2", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.6.0" + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" }, "dependencies": { - "glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "optional": true, - "requires": { - "is-glob": "^4.0.1" - } + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==" } } }, @@ -9914,43 +7330,29 @@ "integrity": "sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==" }, "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", "requires": { - "color-name": "~1.1.4" + "color-name": "1.1.3" } }, "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "commander": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", - "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==" + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" }, "concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true }, "convert-source-map": { "version": "1.9.0", "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==" }, - "core-js-compat": { - "version": "3.31.0", - "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.31.0.tgz", - "integrity": "sha512-hM7YCu1cU6Opx7MXNu0NuumM0ezNeAeRKadixyiQELWY3vT3De9S4J5ZBMraWV2vZnrE1Cirl0GtFtDtMUXzPw==", - "requires": { - "browserslist": "^4.21.5" - } - }, "core-util-is": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", @@ -9995,9 +7397,9 @@ } }, "csstype": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.1.tgz", - "integrity": "sha512-DJR/VvkAvSZW9bTouZue2sSxDwdTN92uHjqeKVm+0dAqdfNykRzQ95tay8aXMBAAPpUiq4Qcug2L7neoRh2Egw==" + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==" }, "damerau-levenshtein": { "version": "1.0.8", @@ -10005,44 +7407,52 @@ "integrity": "sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==", "dev": true }, + "data-view-buffer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/data-view-buffer/-/data-view-buffer-1.0.1.tgz", + "integrity": "sha512-0lht7OugA5x3iJLOWFhWK/5ehONdprk0ISXqVFn/NFrDu+cuc8iADFrGQz5BnRK7LLU3JmkbXSxaqX+/mXYtUA==", + "dev": true, + "requires": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + } + }, + "data-view-byte-length": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/data-view-byte-length/-/data-view-byte-length-1.0.1.tgz", + "integrity": "sha512-4J7wRJD3ABAzr8wP+OcIcqq2dlUKp4DVflx++hs5h5ZKydWMI6/D/fAot+yh6g2tHh8fLFTvNOaVN357NvSrOQ==", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + } + }, + "data-view-byte-offset": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/data-view-byte-offset/-/data-view-byte-offset-1.0.0.tgz", + "integrity": "sha512-t/Ygsytq+R995EJ5PZlD4Cu56sWa8InXySaViRzw9apusqsOO2bQP+SbYzAhR0pFKoB+43lYy8rWban9JSuXnA==", + "dev": true, + "requires": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + } + }, "date-fns": { "version": "2.29.3", "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-2.29.3.tgz", "integrity": "sha512-dDCnyH2WnnKusqvZZ6+jA1O51Ibt8ZMRNkDZdyAyK4YfbDwa/cEmuztzG5pk6hqlp9aSBPYcjOlktquahGwGeA==" }, "debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.3.5", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.5.tgz", + "integrity": "sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==", "requires": { "ms": "2.1.2" } }, - "deep-equal": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-2.2.0.tgz", - "integrity": "sha512-RdpzE0Hv4lhowpIUKKMJfeH6C1pXdtT1/it80ubgWqwI3qpuxUBpC1S4hnHg+zjnuOoDkzUtUCEEkG+XG5l3Mw==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "es-get-iterator": "^1.1.2", - "get-intrinsic": "^1.1.3", - "is-arguments": "^1.1.1", - "is-array-buffer": "^3.0.1", - "is-date-object": "^1.0.5", - "is-regex": "^1.1.4", - "is-shared-array-buffer": "^1.0.2", - "isarray": "^2.0.5", - "object-is": "^1.1.5", - "object-keys": "^1.1.1", - "object.assign": "^4.1.4", - "regexp.prototype.flags": "^1.4.3", - "side-channel": "^1.0.4", - "which-boxed-primitive": "^1.0.2", - "which-collection": "^1.0.1", - "which-typed-array": "^1.1.9" - } - }, "deep-is": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", @@ -10054,22 +7464,34 @@ "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-2.2.1.tgz", "integrity": "sha512-R9hc1Xa/NOBi9WRVUWg19rl1UB7Tt4kuPd+thNJgFZoxXsTz7ncaPaeIm+40oSGuP33DfMb4sZt1QIGiJzC4EA==" }, - "define-lazy-prop": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", - "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", - "dev": true - }, - "define-properties": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.0.tgz", - "integrity": "sha512-xvqAVKGfT1+UAvPwKTVw/njhdQ8ZhXK4lI0bCIuCMrp2up9nPnaDftrLtmpTazqd1o+UY4zgzU+avtMbDP+ldA==", + "define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", "dev": true, "requires": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + } + }, + "define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "dev": true, + "requires": { + "define-data-property": "^1.0.1", "has-property-descriptors": "^1.0.0", "object-keys": "^1.1.1" } }, + "dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "dev": true + }, "dir-glob": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", @@ -10111,9 +7533,9 @@ "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" }, "readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", "requires": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", @@ -10124,6 +7546,11 @@ "util-deprecate": "~1.0.1" } }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, "string_decoder": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", @@ -10134,10 +7561,11 @@ } } }, - "electron-to-chromium": { - "version": "1.4.433", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.433.tgz", - "integrity": "sha512-MGO1k0w1RgrfdbLVwmXcDhHHuxCn2qRgR7dYsJvWFKDttvYPx6FNzCGG0c/fBBvzK2LDh3UV7Tt9awnHnvAAUQ==" + "eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "dev": true }, "emoji-regex": { "version": "9.2.2", @@ -10146,26 +7574,26 @@ "dev": true }, "engine.io-client": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/engine.io-client/-/engine.io-client-6.4.0.tgz", - "integrity": "sha512-GyKPDyoEha+XZ7iEqam49vz6auPnNJ9ZBfy89f+rMMas8AuiMWOZ9PVzu8xb9ZC6rafUqiGHSCfu22ih66E+1g==", + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/engine.io-client/-/engine.io-client-6.5.3.tgz", + "integrity": "sha512-9Z0qLB0NIisTRt1DZ/8U2k12RJn8yls/nXMZLn+/N8hANT3TcYjKFKcwbw5zFQiN4NTde3TSY9zb79e1ij6j9Q==", "requires": { "@socket.io/component-emitter": "~3.1.0", "debug": "~4.3.1", - "engine.io-parser": "~5.0.3", + "engine.io-parser": "~5.2.1", "ws": "~8.11.0", "xmlhttprequest-ssl": "~2.0.0" } }, "engine.io-parser": { - "version": "5.0.7", - "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-5.0.7.tgz", - "integrity": "sha512-P+jDFbvK6lE3n1OL+q9KuzdOFWkkZ/cMV9gol/SbVfpyqfvrfrFTOFJ6fQm2VC3PZHlU3QPhVwmbsCnauHF2MQ==" + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-5.2.2.tgz", + "integrity": "sha512-RcyUFKA93/CXH20l4SoVvzZfrSDMOTUS3bWVpTt2FuFP+XYrL8i8oonHP7WInRyVHXh0n/ORtoeiE1os+8qkSw==" }, "enhanced-resolve": { - "version": "5.12.0", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.12.0.tgz", - "integrity": "sha512-QHTXI/sZQmko1cbDoNAa3mJ5qhWUUNAq3vR0/YiD379fWQrcfuoX1+HW2S0MTt7XmoPLapdaDKUtelUSPic7hQ==", + "version": "5.17.0", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.0.tgz", + "integrity": "sha512-dwDPwZL0dmye8Txp2gzFmA6sxALaSvdRDjPH0viLcKrtlOL3tw62nWWweVD1SdILDTJrbrL6tdWVN58Wo6U3eA==", "dev": true, "requires": { "graceful-fs": "^4.2.4", @@ -10186,81 +7614,123 @@ } }, "es-abstract": { - "version": "1.21.1", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.21.1.tgz", - "integrity": "sha512-QudMsPOz86xYz/1dG1OuGBKOELjCh99IIWHLzy5znUB6j8xG2yMA7bfTV86VSqKF+Y/H08vQPR+9jyXpuC6hfg==", + "version": "1.23.3", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.23.3.tgz", + "integrity": "sha512-e+HfNH61Bj1X9/jLc5v1owaLYuHdeHHSQlkhCBiTK8rBvKaULl/beGMxwrMXjpYrv4pz22BlY570vVePA2ho4A==", "dev": true, "requires": { - "available-typed-arrays": "^1.0.5", - "call-bind": "^1.0.2", - "es-set-tostringtag": "^2.0.1", + "array-buffer-byte-length": "^1.0.1", + "arraybuffer.prototype.slice": "^1.0.3", + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", + "data-view-buffer": "^1.0.1", + "data-view-byte-length": "^1.0.1", + "data-view-byte-offset": "^1.0.0", + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-set-tostringtag": "^2.0.3", "es-to-primitive": "^1.2.1", - "function-bind": "^1.1.1", - "function.prototype.name": "^1.1.5", - "get-intrinsic": "^1.1.3", - "get-symbol-description": "^1.0.0", + "function.prototype.name": "^1.1.6", + "get-intrinsic": "^1.2.4", + "get-symbol-description": "^1.0.2", "globalthis": "^1.0.3", "gopd": "^1.0.1", - "has": "^1.0.3", - "has-property-descriptors": "^1.0.0", - "has-proto": "^1.0.1", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.0.3", "has-symbols": "^1.0.3", - "internal-slot": "^1.0.4", - "is-array-buffer": "^3.0.1", + "hasown": "^2.0.2", + "internal-slot": "^1.0.7", + "is-array-buffer": "^3.0.4", "is-callable": "^1.2.7", - "is-negative-zero": "^2.0.2", + "is-data-view": "^1.0.1", + "is-negative-zero": "^2.0.3", "is-regex": "^1.1.4", - "is-shared-array-buffer": "^1.0.2", + "is-shared-array-buffer": "^1.0.3", "is-string": "^1.0.7", - "is-typed-array": "^1.1.10", + "is-typed-array": "^1.1.13", "is-weakref": "^1.0.2", - "object-inspect": "^1.12.2", + "object-inspect": "^1.13.1", "object-keys": "^1.1.1", - "object.assign": "^4.1.4", - "regexp.prototype.flags": "^1.4.3", - "safe-regex-test": "^1.0.0", - "string.prototype.trimend": "^1.0.6", - "string.prototype.trimstart": "^1.0.6", - "typed-array-length": "^1.0.4", + "object.assign": "^4.1.5", + "regexp.prototype.flags": "^1.5.2", + "safe-array-concat": "^1.1.2", + "safe-regex-test": "^1.0.3", + "string.prototype.trim": "^1.2.9", + "string.prototype.trimend": "^1.0.8", + "string.prototype.trimstart": "^1.0.8", + "typed-array-buffer": "^1.0.2", + "typed-array-byte-length": "^1.0.1", + "typed-array-byte-offset": "^1.0.2", + "typed-array-length": "^1.0.6", "unbox-primitive": "^1.0.2", - "which-typed-array": "^1.1.9" + "which-typed-array": "^1.1.15" } }, - "es-get-iterator": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/es-get-iterator/-/es-get-iterator-1.1.3.tgz", - "integrity": "sha512-sPZmqHBe6JIiTfN5q2pEi//TwxmAFHwj/XEuYjTuse78i8KxaqMTTzxPoFKuzRpDpTJ+0NAbpfenkmH2rePtuw==", + "es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", "dev": true, "requires": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.3", + "get-intrinsic": "^1.2.4" + } + }, + "es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true + }, + "es-iterator-helpers": { + "version": "1.0.19", + "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.0.19.tgz", + "integrity": "sha512-zoMwbCcH5hwUkKJkT8kDIBZSz9I6mVG//+lDCinLCGov4+r7NIy0ld8o03M0cJxl2spVf6ESYVS6/gpIfq1FFw==", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3", + "es-errors": "^1.3.0", + "es-set-tostringtag": "^2.0.3", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "globalthis": "^1.0.3", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.0.3", "has-symbols": "^1.0.3", - "is-arguments": "^1.1.1", - "is-map": "^2.0.2", - "is-set": "^2.0.2", - "is-string": "^1.0.7", - "isarray": "^2.0.5", - "stop-iteration-iterator": "^1.0.0" + "internal-slot": "^1.0.7", + "iterator.prototype": "^1.1.2", + "safe-array-concat": "^1.1.2" + } + }, + "es-object-atoms": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.0.0.tgz", + "integrity": "sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw==", + "dev": true, + "requires": { + "es-errors": "^1.3.0" } }, "es-set-tostringtag": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.1.tgz", - "integrity": "sha512-g3OMbtlwY3QewlqAiMLI47KywjWZoEytKr8pf6iTC8uJq5bIAH52Z9pnQ8pVL6whrCto53JZDuUIsifGeLorTg==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.3.tgz", + "integrity": "sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ==", "dev": true, "requires": { - "get-intrinsic": "^1.1.3", - "has": "^1.0.3", - "has-tostringtag": "^1.0.0" + "get-intrinsic": "^1.2.4", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.1" } }, "es-shim-unscopables": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.0.tgz", - "integrity": "sha512-Jm6GPcCdC30eMLbZ2x8z2WuRwAws3zTBBKuusffYVUrNj/GVSUAZ+xKMaUpfNDR5IbyNA5LJbaecoUVbmUcB1w==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz", + "integrity": "sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw==", "dev": true, "requires": { - "has": "^1.0.3" + "hasown": "^2.0.0" } }, "es-to-primitive": { @@ -10274,11 +7744,6 @@ "is-symbol": "^1.0.2" } }, - "escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==" - }, "escape-string-regexp": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", @@ -10329,34 +7794,100 @@ "strip-ansi": "^6.0.1", "strip-json-comments": "^3.1.0", "text-table": "^0.2.0" + }, + "dependencies": { + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "requires": { + "type-fest": "^0.20.2" + } + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + }, + "type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true + } } }, "eslint-config-next": { - "version": "13.1.6", - "resolved": "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-13.1.6.tgz", - "integrity": "sha512-0cg7h5wztg/SoLAlxljZ0ZPUQ7i6QKqRiP4M2+MgTZtxWwNKb2JSwNc18nJ6/kXBI6xYvPraTbQSIhAuVw6czw==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-14.2.4.tgz", + "integrity": "sha512-Qr0wMgG9m6m4uYy2jrYJmyuNlYZzPRQq5Kvb9IDlYwn+7yq6W6sfMNFgb+9guM1KYwuIo6TIaiFhZJ6SnQ/Efw==", "dev": true, "requires": { - "@next/eslint-plugin-next": "13.1.6", - "@rushstack/eslint-patch": "^1.1.3", - "@typescript-eslint/parser": "^5.42.0", + "@next/eslint-plugin-next": "14.2.4", + "@rushstack/eslint-patch": "^1.3.3", + "@typescript-eslint/parser": "^5.4.2 || ^6.0.0 || 7.0.0 - 7.2.0", "eslint-import-resolver-node": "^0.3.6", "eslint-import-resolver-typescript": "^3.5.2", - "eslint-plugin-import": "^2.26.0", - "eslint-plugin-jsx-a11y": "^6.5.1", - "eslint-plugin-react": "^7.31.7", - "eslint-plugin-react-hooks": "^4.5.0" + "eslint-plugin-import": "^2.28.1", + "eslint-plugin-jsx-a11y": "^6.7.1", + "eslint-plugin-react": "^7.33.2", + "eslint-plugin-react-hooks": "^4.5.0 || 5.0.0-canary-7118f5dd7-20230705" } }, "eslint-import-resolver-node": { - "version": "0.3.7", - "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.7.tgz", - "integrity": "sha512-gozW2blMLJCeFpBwugLTGyvVjNoeo1knonXAcatC6bjPBZitotxdWf7Gimr25N4c0AAOo4eOUfaG82IJPDpqCA==", + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.9.tgz", + "integrity": "sha512-WFj2isz22JahUv+B788TlO3N6zL3nNJGU8CcZbPZvVEkBPaJdCV4vy5wyghty5ROFbCRnm132v8BScu5/1BQ8g==", "dev": true, "requires": { "debug": "^3.2.7", - "is-core-module": "^2.11.0", - "resolve": "^1.22.1" + "is-core-module": "^2.13.0", + "resolve": "^1.22.4" }, "dependencies": { "debug": { @@ -10371,45 +7902,24 @@ } }, "eslint-import-resolver-typescript": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/eslint-import-resolver-typescript/-/eslint-import-resolver-typescript-3.5.3.tgz", - "integrity": "sha512-njRcKYBc3isE42LaTcJNVANR3R99H9bAxBDMNDr2W7yq5gYPxbU3MkdhsQukxZ/Xg9C2vcyLlDsbKfRDg0QvCQ==", + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-typescript/-/eslint-import-resolver-typescript-3.6.1.tgz", + "integrity": "sha512-xgdptdoi5W3niYeuQxKmzVDTATvLYqhpwmykwsh7f6HIOStGWEIL9iqZgQDF9u9OEzrRwR8no5q2VT+bjAujTg==", "dev": true, "requires": { "debug": "^4.3.4", - "enhanced-resolve": "^5.10.0", - "get-tsconfig": "^4.2.0", - "globby": "^13.1.2", - "is-core-module": "^2.10.0", - "is-glob": "^4.0.3", - "synckit": "^0.8.4" - }, - "dependencies": { - "globby": { - "version": "13.1.3", - "resolved": "https://registry.npmjs.org/globby/-/globby-13.1.3.tgz", - "integrity": "sha512-8krCNHXvlCgHDpegPzleMq07yMYTO2sXKASmZmquEYWEmCx6J5UTRbp5RwMJkTJGtcQ44YpiUYUiN0b9mzy8Bw==", - "dev": true, - "requires": { - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.11", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^4.0.0" - } - }, - "slash": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", - "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", - "dev": true - } + "enhanced-resolve": "^5.12.0", + "eslint-module-utils": "^2.7.4", + "fast-glob": "^3.3.1", + "get-tsconfig": "^4.5.0", + "is-core-module": "^2.11.0", + "is-glob": "^4.0.3" } }, "eslint-module-utils": { - "version": "2.7.4", - "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.7.4.tgz", - "integrity": "sha512-j4GT+rqzCoRKHwURX7pddtIPGySnX9Si/cgMI5ztrcqOPtk5dDEeZ34CQVPphnqkJytlc97Vuk05Um2mJ3gEQA==", + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.8.1.tgz", + "integrity": "sha512-rXDXR3h7cs7dy9RNpUlQf80nX31XWJEyGq1tRMo+6GsO5VmTe4UTwtmonAD4ZkAsrfMVDA2wlGJ3790Ys+D49Q==", "dev": true, "requires": { "debug": "^3.2.7" @@ -10427,26 +7937,28 @@ } }, "eslint-plugin-import": { - "version": "2.27.5", - "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.27.5.tgz", - "integrity": "sha512-LmEt3GVofgiGuiE+ORpnvP+kAm3h6MLZJ4Q5HCyHADofsb4VzXFsRiWj3c0OFiV+3DWFh0qg3v9gcPlfc3zRow==", + "version": "2.29.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.29.1.tgz", + "integrity": "sha512-BbPC0cuExzhiMo4Ff1BTVwHpjjv28C5R+btTOGaCRC7UEz801up0JadwkeSk5Ued6TG34uaczuVuH6qyy5YUxw==", "dev": true, "requires": { - "array-includes": "^3.1.6", - "array.prototype.flat": "^1.3.1", - "array.prototype.flatmap": "^1.3.1", + "array-includes": "^3.1.7", + "array.prototype.findlastindex": "^1.2.3", + "array.prototype.flat": "^1.3.2", + "array.prototype.flatmap": "^1.3.2", "debug": "^3.2.7", "doctrine": "^2.1.0", - "eslint-import-resolver-node": "^0.3.7", - "eslint-module-utils": "^2.7.4", - "has": "^1.0.3", - "is-core-module": "^2.11.0", + "eslint-import-resolver-node": "^0.3.9", + "eslint-module-utils": "^2.8.0", + "hasown": "^2.0.0", + "is-core-module": "^2.13.1", "is-glob": "^4.0.3", "minimatch": "^3.1.2", - "object.values": "^1.1.6", - "resolve": "^1.22.1", - "semver": "^6.3.0", - "tsconfig-paths": "^3.14.1" + "object.fromentries": "^2.0.7", + "object.groupby": "^1.0.1", + "object.values": "^1.1.7", + "semver": "^6.3.1", + "tsconfig-paths": "^3.15.0" }, "dependencies": { "debug": { @@ -10476,58 +7988,53 @@ } }, "eslint-plugin-jsx-a11y": { - "version": "6.7.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.7.1.tgz", - "integrity": "sha512-63Bog4iIethyo8smBklORknVjB0T2dwB8Mr/hIC+fBS0uyHdYYpzM/Ed+YC8VxTjlXHEWFOdmgwcDn1U2L9VCA==", + "version": "6.8.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.8.0.tgz", + "integrity": "sha512-Hdh937BS3KdwwbBaKd5+PLCOmYY6U4f2h9Z2ktwtNKvIdIEu137rjYbcb9ApSbVJfWxANNuiKTD/9tOKjK9qOA==", "dev": true, "requires": { - "@babel/runtime": "^7.20.7", - "aria-query": "^5.1.3", - "array-includes": "^3.1.6", - "array.prototype.flatmap": "^1.3.1", - "ast-types-flow": "^0.0.7", - "axe-core": "^4.6.2", - "axobject-query": "^3.1.1", + "@babel/runtime": "^7.23.2", + "aria-query": "^5.3.0", + "array-includes": "^3.1.7", + "array.prototype.flatmap": "^1.3.2", + "ast-types-flow": "^0.0.8", + "axe-core": "=4.7.0", + "axobject-query": "^3.2.1", "damerau-levenshtein": "^1.0.8", "emoji-regex": "^9.2.2", - "has": "^1.0.3", - "jsx-ast-utils": "^3.3.3", - "language-tags": "=1.0.5", + "es-iterator-helpers": "^1.0.15", + "hasown": "^2.0.0", + "jsx-ast-utils": "^3.3.5", + "language-tags": "^1.0.9", "minimatch": "^3.1.2", - "object.entries": "^1.1.6", - "object.fromentries": "^2.0.6", - "semver": "^6.3.0" - }, - "dependencies": { - "semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "dev": true - } + "object.entries": "^1.1.7", + "object.fromentries": "^2.0.7" } }, "eslint-plugin-react": { - "version": "7.32.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.32.2.tgz", - "integrity": "sha512-t2fBMa+XzonrrNkyVirzKlvn5RXzzPwRHtMvLAtVZrt8oxgnTQaYbU6SXTOO1mwQgp1y5+toMSKInnzGr0Knqg==", + "version": "7.34.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.34.2.tgz", + "integrity": "sha512-2HCmrU+/JNigDN6tg55cRDKCQWicYAPB38JGSFDQt95jDm8rrvSUo7YPkOIm5l6ts1j1zCvysNcasvfTMQzUOw==", "dev": true, "requires": { - "array-includes": "^3.1.6", - "array.prototype.flatmap": "^1.3.1", - "array.prototype.tosorted": "^1.1.1", + "array-includes": "^3.1.8", + "array.prototype.findlast": "^1.2.5", + "array.prototype.flatmap": "^1.3.2", + "array.prototype.toreversed": "^1.1.2", + "array.prototype.tosorted": "^1.1.3", "doctrine": "^2.1.0", + "es-iterator-helpers": "^1.0.19", "estraverse": "^5.3.0", "jsx-ast-utils": "^2.4.1 || ^3.0.0", "minimatch": "^3.1.2", - "object.entries": "^1.1.6", - "object.fromentries": "^2.0.6", - "object.hasown": "^1.1.2", - "object.values": "^1.1.6", + "object.entries": "^1.1.8", + "object.fromentries": "^2.0.8", + "object.hasown": "^1.1.4", + "object.values": "^1.2.0", "prop-types": "^15.8.1", - "resolve": "^2.0.0-next.4", - "semver": "^6.3.0", - "string.prototype.matchall": "^4.0.8" + "resolve": "^2.0.0-next.5", + "semver": "^6.3.1", + "string.prototype.matchall": "^4.0.11" }, "dependencies": { "doctrine": { @@ -10540,12 +8047,12 @@ } }, "resolve": { - "version": "2.0.0-next.4", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.4.tgz", - "integrity": "sha512-iMDbmAWtfU+MHpxt/I5iWI7cY6YVEZUQ3MBgPQ++XD1PELuJHIl82xBmObyP2KyQmkNB2dsqF7seoQQiAn5yDQ==", + "version": "2.0.0-next.5", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.5.tgz", + "integrity": "sha512-U7WjGVG9sH8tvjW5SmGbQuui75FiyjAX72HX15DwBBwF9dNiQZRQAg9nnPhYy+TUnE0+VcrttuvNI8oSxZcocA==", "dev": true, "requires": { - "is-core-module": "^2.9.0", + "is-core-module": "^2.13.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" } @@ -10559,16 +8066,16 @@ } }, "eslint-plugin-react-hooks": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.0.tgz", - "integrity": "sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g==", + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.2.tgz", + "integrity": "sha512-QzliNJq4GinDBcD8gPB5v0wh6g8q3SUi6EFF0x8N/BL9PoVs0atuGc47ozMRyOWAKdwaZ5OnbOEa3WR+dSGKuQ==", "dev": true, "requires": {} }, "eslint-scope": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.1.1.tgz", - "integrity": "sha512-QKQM/UXpIiHcLqJ5AOyIW7XZmzjkzQXYE54n1++wb0u9V/abW3l9uQnxX8Z5Xd18xyKIMTUAyQ0k1e8pz6LUrw==", + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", "dev": true, "requires": { "esrecurse": "^4.3.0", @@ -10593,26 +8100,26 @@ } }, "eslint-visitor-keys": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.3.0.tgz", - "integrity": "sha512-mQ+suqKJVyeuwGYHAdjMFqjCyfl8+Ldnxuyp3ldiMBFKkvytrXUZWaiPCEav8qDHKty44bD+qV1IP4T+w+xXRA==", + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", "dev": true }, "espree": { - "version": "9.4.1", - "resolved": "https://registry.npmjs.org/espree/-/espree-9.4.1.tgz", - "integrity": "sha512-XwctdmTO6SIvCzd9810yyNzIrOrqNYV9Koizx4C/mRhf9uq0o4yHoCEU/670pOxOL/MSraektvSAji79kX90Vg==", + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", "dev": true, "requires": { - "acorn": "^8.8.0", + "acorn": "^8.9.0", "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^3.3.0" + "eslint-visitor-keys": "^3.4.1" } }, "esquery": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.4.2.tgz", - "integrity": "sha512-JVSoLdTlTDkmjFmab7H/9SL9qGSyjElT3myyKp7krqjVFQCDLmj1QFaCLRFBszBKI0XVZaiiXvuPIX3ZwHe1Ng==", + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", + "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", "dev": true, "requires": { "estraverse": "^5.1.0" @@ -10636,7 +8143,8 @@ "esutils": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==" + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true }, "fast-deep-equal": { "version": "3.1.3", @@ -10645,9 +8153,9 @@ "dev": true }, "fast-glob": { - "version": "3.2.12", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", - "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", "dev": true, "requires": { "@nodelib/fs.stat": "^2.0.2", @@ -10681,9 +8189,9 @@ "dev": true }, "fastq": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", - "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==", + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", "dev": true, "requires": { "reusify": "^1.0.4" @@ -10699,10 +8207,10 @@ } }, "fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "devOptional": true, + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, "requires": { "to-regex-range": "^5.0.1" } @@ -10723,19 +8231,20 @@ } }, "flat-cache": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", - "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", "dev": true, "requires": { - "flatted": "^3.1.0", + "flatted": "^3.2.9", + "keyv": "^4.5.3", "rimraf": "^3.0.2" } }, "flatted": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.7.tgz", - "integrity": "sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==", + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.1.tgz", + "integrity": "sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==", "dev": true }, "for-each": { @@ -10747,6 +8256,16 @@ "is-callable": "^1.1.3" } }, + "foreground-child": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.2.0.tgz", + "integrity": "sha512-CrWQNaEl1/6WeZoarcM9LHupTo3RpZO2Pdk1vktwzPiQTsJnAKJmm3TACKeG5UZbWDfaH2AbvYxzP96y0MT7fA==", + "dev": true, + "requires": { + "cross-spawn": "^7.0.0", + "signal-exit": "^4.0.1" + } + }, "formik": { "version": "2.2.9", "resolved": "https://registry.npmjs.org/formik/-/formik-2.2.9.tgz", @@ -10761,37 +8280,27 @@ "tslib": "^1.10.0" } }, - "fs-readdir-recursive": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/fs-readdir-recursive/-/fs-readdir-recursive-1.1.0.tgz", - "integrity": "sha512-GNanXlVr2pf02+sPN40XN8HG+ePaNcvM0q5mZBd668Obwb0yD5GiUbZOFgwn8kGMY6I3mdyDJzieUy3PTYyTRA==" - }, "fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" - }, - "fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", - "optional": true + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true }, "function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==" }, "function.prototype.name": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.5.tgz", - "integrity": "sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA==", + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.6.tgz", + "integrity": "sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==", "dev": true, "requires": { "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.19.0", - "functions-have-names": "^1.2.2" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "functions-have-names": "^1.2.3" } }, "functions-have-names": { @@ -10800,42 +8309,43 @@ "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==", "dev": true }, - "gensync": { - "version": "1.0.0-beta.2", - "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", - "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==" - }, "get-browser-rtc": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/get-browser-rtc/-/get-browser-rtc-1.1.0.tgz", "integrity": "sha512-MghbMJ61EJrRsDe7w1Bvqt3ZsBuqhce5nrn/XAwgwOXhcsz53/ltdxOse1h/8eKXj5slzxdsz56g5rzOFSGwfQ==" }, "get-intrinsic": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.0.tgz", - "integrity": "sha512-L049y6nFOuom5wGyRc3/gdTLO94dySVKRACj1RmJZBQXlbTMhtNIgkWkUHq+jYmZvKf14EW1EoJnnjbmoHij0Q==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", "dev": true, "requires": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" } }, "get-symbol-description": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.0.tgz", - "integrity": "sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.2.tgz", + "integrity": "sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg==", "dev": true, "requires": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.1" + "call-bind": "^1.0.5", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4" } }, "get-tsconfig": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.4.0.tgz", - "integrity": "sha512-0Gdjo/9+FzsYhXCEFueo2aY1z1tpXrxWZzP7k8ul9qt1U5o8rYJwTJYmaeHdrVosYIVYkOy2iwCJ9FdpocJhPQ==", - "dev": true + "version": "4.7.5", + "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.7.5.tgz", + "integrity": "sha512-ZCuZCnlqNzjb4QprAzXKdpp/gh6KTxSJuw3IBsPnV/7fV4NxC9ckB+vPTt8w7fJA0TaSD7c55BR47JD6MEDyDw==", + "dev": true, + "requires": { + "resolve-pkg-maps": "^1.0.0" + } }, "glob": { "version": "7.1.7", @@ -10860,35 +8370,21 @@ "is-glob": "^4.0.3" } }, - "glob-to-regexp": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", - "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" - }, "globals": { - "version": "13.20.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz", - "integrity": "sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==", - "dev": true, - "requires": { - "type-fest": "^0.20.2" - } + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==" }, "globalthis": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.3.tgz", - "integrity": "sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.4.tgz", + "integrity": "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==", "dev": true, "requires": { - "define-properties": "^1.1.3" + "define-properties": "^1.2.1", + "gopd": "^1.0.1" } }, - "globalyzer": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/globalyzer/-/globalyzer-0.1.0.tgz", - "integrity": "sha512-40oNTM9UfG6aBmuKxk/giHn5nQ8RVz/SS4Ir6zgzOv9/qC3kKZ9v4etGTcJbEl/NyVQH7FGU7d+X1egr57Md2Q==", - "dev": true - }, "globby": { "version": "11.1.0", "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", @@ -10903,12 +8399,6 @@ "slash": "^3.0.0" } }, - "globrex": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/globrex/-/globrex-0.1.2.tgz", - "integrity": "sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==", - "dev": true - }, "gopd": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", @@ -10919,9 +8409,9 @@ } }, "graceful-fs": { - "version": "4.2.10", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz", - "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==" + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" }, "grapheme-splitter": { "version": "1.0.4", @@ -10929,14 +8419,6 @@ "integrity": "sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==", "dev": true }, - "has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "requires": { - "function-bind": "^1.1.1" - } - }, "has-bigints": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz", @@ -10944,24 +8426,23 @@ "dev": true }, "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==" }, "has-property-descriptors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz", - "integrity": "sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", "dev": true, "requires": { - "get-intrinsic": "^1.1.1" + "es-define-property": "^1.0.0" } }, "has-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", - "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", "dev": true }, "has-symbols": { @@ -10971,12 +8452,20 @@ "dev": true }, "has-tostringtag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", - "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", "dev": true, "requires": { - "has-symbols": "^1.0.2" + "has-symbols": "^1.0.3" + } + }, + "hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "requires": { + "function-bind": "^1.1.2" } }, "hoist-non-react-statics": { @@ -11012,9 +8501,9 @@ "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==" }, "ignore": { - "version": "5.2.4", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", - "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==", + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", + "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", "dev": true }, "import-fresh": { @@ -11036,6 +8525,7 @@ "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "dev": true, "requires": { "once": "^1.3.0", "wrappy": "1" @@ -11047,35 +8537,24 @@ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, "internal-slot": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.5.tgz", - "integrity": "sha512-Y+R5hJrzs52QCG2laLn4udYVnxsfny9CpOhNhUvk/SSSVyF6T27FzRbF0sroPidSu3X8oEAkOn2K804mjpt6UQ==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.7.tgz", + "integrity": "sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g==", "dev": true, "requires": { - "get-intrinsic": "^1.2.0", - "has": "^1.0.3", + "es-errors": "^1.3.0", + "hasown": "^2.0.0", "side-channel": "^1.0.4" } }, - "is-arguments": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.1.tgz", - "integrity": "sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" - } - }, "is-array-buffer": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.1.tgz", - "integrity": "sha512-ASfLknmY8Xa2XtB4wmbz13Wu202baeA18cJBCeCy0wXUHZF0IPyVEXqKEcd+t2fNSLLL1vC6k7lxZEojNbISXQ==", + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.4.tgz", + "integrity": "sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw==", "dev": true, "requires": { "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.3", - "is-typed-array": "^1.1.10" + "get-intrinsic": "^1.2.1" } }, "is-arrayish": { @@ -11083,6 +8562,15 @@ "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==" }, + "is-async-function": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.0.0.tgz", + "integrity": "sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA==", + "dev": true, + "requires": { + "has-tostringtag": "^1.0.0" + } + }, "is-bigint": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz", @@ -11092,15 +8580,6 @@ "has-bigints": "^1.0.1" } }, - "is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", - "optional": true, - "requires": { - "binary-extensions": "^2.0.0" - } - }, "is-boolean-object": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz", @@ -11118,11 +8597,20 @@ "dev": true }, "is-core-module": { - "version": "2.11.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.11.0.tgz", - "integrity": "sha512-RRjxlvLDkD1YJwDbroBHMb+cukurkDWNyHx7D3oNB5x9rb5ogcksMC5wHCadcXoo67gVr/+3GFySh3134zi6rw==", + "version": "2.13.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz", + "integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==", "requires": { - "has": "^1.0.3" + "hasown": "^2.0.0" + } + }, + "is-data-view": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-data-view/-/is-data-view-1.0.1.tgz", + "integrity": "sha512-AHkaJrsUVW6wq6JS8y3JnM/GJF/9cf+k20+iDzlSaJrinEo5+7vRiteOSwBhHRiAyQATN1AmY4hwzxJKPmYf+w==", + "dev": true, + "requires": { + "is-typed-array": "^1.1.13" } }, "is-date-object": { @@ -11134,44 +8622,62 @@ "has-tostringtag": "^1.0.0" } }, - "is-docker": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", - "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", - "dev": true - }, "is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", - "devOptional": true + "dev": true + }, + "is-finalizationregistry": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-finalizationregistry/-/is-finalizationregistry-1.0.2.tgz", + "integrity": "sha512-0by5vtUJs8iFQb5TYUHHPudOR+qXYIMKtiUzvLIZITZUjknFmziyBJuLhVRc+Ds0dREFlskDNJKYIdIzu/9pfw==", + "dev": true, + "requires": { + "call-bind": "^1.0.2" + } + }, + "is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true + }, + "is-generator-function": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz", + "integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==", + "dev": true, + "requires": { + "has-tostringtag": "^1.0.0" + } }, "is-glob": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", - "devOptional": true, + "dev": true, "requires": { "is-extglob": "^2.1.1" } }, "is-map": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.2.tgz", - "integrity": "sha512-cOZFQQozTha1f4MxLFzlgKYPTyj26picdZTx82hbc/Xf4K/tZOOXSCkMvU4pKioRXGDLJRn0GM7Upe7kR721yg==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.3.tgz", + "integrity": "sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==", "dev": true }, "is-negative-zero": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.2.tgz", - "integrity": "sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.3.tgz", + "integrity": "sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==", "dev": true }, "is-number": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "devOptional": true + "dev": true }, "is-number-object": { "version": "1.0.7", @@ -11199,18 +8705,18 @@ } }, "is-set": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.2.tgz", - "integrity": "sha512-+2cnTEZeY5z/iXGbLhPrOAaK/Mau5k5eXq9j14CpRTftq0pAJu2MwVRSZhyZWBzx3o6X795Lz6Bpb6R0GKf37g==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.3.tgz", + "integrity": "sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==", "dev": true }, "is-shared-array-buffer": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz", - "integrity": "sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.3.tgz", + "integrity": "sha512-nA2hv5XIhLR3uVzDDfCIknerhx8XUKnstuOERPNNIinXG7v9u+ohXF67vxm4TPTEPU6lm61ZkwP3c9PCB97rhg==", "dev": true, "requires": { - "call-bind": "^1.0.2" + "call-bind": "^1.0.7" } }, "is-string": { @@ -11232,22 +8738,18 @@ } }, "is-typed-array": { - "version": "1.1.10", - "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.10.tgz", - "integrity": "sha512-PJqgEHiWZvMpaFZ3uTc8kHPM4+4ADTlDniuQL7cU/UDA0Ql7F70yGfHph3cLNe+c9toaigv+DFzTJKhc2CtO6A==", + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.13.tgz", + "integrity": "sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw==", "dev": true, "requires": { - "available-typed-arrays": "^1.0.5", - "call-bind": "^1.0.2", - "for-each": "^0.3.3", - "gopd": "^1.0.1", - "has-tostringtag": "^1.0.0" + "which-typed-array": "^1.1.14" } }, "is-weakmap": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.1.tgz", - "integrity": "sha512-NSBR4kH5oVj1Uwvv970ruUkCV7O1mzgVFO4/rev2cLRda9Tm9HrL70ZPut4rOHgY0FNrUu9BCbXA2sdQ+x0chA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.2.tgz", + "integrity": "sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==", "dev": true }, "is-weakref": { @@ -11260,29 +8762,19 @@ } }, "is-weakset": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.2.tgz", - "integrity": "sha512-t2yVvttHkQktwnNNmBQ98AhENLdPUTDTE21uPqAQ0ARwQfGeQKRVS0NNurH7bTf7RrvcVn1OOge45CnBeHCSmg==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.3.tgz", + "integrity": "sha512-LvIm3/KWzS9oRFHugab7d+M/GcBXuXX5xZkzPmN+NxihdQlZUQ4dWuSV1xR/sq6upL1TJEDrfBgRepHFdBtSNQ==", "dev": true, "requires": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.1" - } - }, - "is-wsl": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", - "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", - "dev": true, - "requires": { - "is-docker": "^2.0.0" + "call-bind": "^1.0.7", + "get-intrinsic": "^1.2.4" } }, "isarray": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", - "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", - "dev": true + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==" }, "isexe": { "version": "2.0.0", @@ -11290,10 +8782,33 @@ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", "dev": true }, + "iterator.prototype": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.2.tgz", + "integrity": "sha512-DR33HMMr8EzwuRL8Y9D3u2BMj8+RqSE850jfGu59kS7tbmPLzGkZmVSfyCFSDxuZiEY6Rzt3T2NA/qU+NwVj1w==", + "dev": true, + "requires": { + "define-properties": "^1.2.1", + "get-intrinsic": "^1.2.1", + "has-symbols": "^1.0.3", + "reflect.getprototypeof": "^1.0.4", + "set-function-name": "^2.0.1" + } + }, + "jackspeak": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz", + "integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==", + "dev": true, + "requires": { + "@isaacs/cliui": "^8.0.2", + "@pkgjs/parseargs": "^0.11.0" + } + }, "js-sdsl": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/js-sdsl/-/js-sdsl-4.3.0.tgz", - "integrity": "sha512-mifzlm2+5nZ+lEcLJMoBK0/IH/bDg8XnJfd/Wq6IP+xoCjLZsTOnV2QpxlVbX9bMnkl5PdEjNtBJ9Cj1NjifhQ==", + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/js-sdsl/-/js-sdsl-4.4.2.tgz", + "integrity": "sha512-dwXFwByc/ajSV6m5bcKAPwe4yDDF6D614pxmIi5odytzxRlwqF6nwoiCek80Ixc7Cvma5awClxrzFtxCQvcM8w==", "dev": true }, "js-tokens": { @@ -11315,6 +8830,12 @@ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==" }, + "json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true + }, "json-parse-even-better-errors": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", @@ -11342,28 +8863,39 @@ } }, "jsx-ast-utils": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.3.tgz", - "integrity": "sha512-fYQHZTZ8jSfmWZ0iyzfwiU4WDX4HpHbMCZ3gPlWYiCl3BoeOTsqKBqnTVfH2rYT7eP5c3sVbeSPHnnJOaTrWiw==", + "version": "3.3.5", + "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz", + "integrity": "sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==", "dev": true, "requires": { - "array-includes": "^3.1.5", - "object.assign": "^4.1.3" + "array-includes": "^3.1.6", + "array.prototype.flat": "^1.3.1", + "object.assign": "^4.1.4", + "object.values": "^1.1.6" + } + }, + "keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "requires": { + "json-buffer": "3.0.1" } }, "language-subtag-registry": { - "version": "0.3.22", - "resolved": "https://registry.npmjs.org/language-subtag-registry/-/language-subtag-registry-0.3.22.tgz", - "integrity": "sha512-tN0MCzyWnoz/4nHS6uxdlFWoUZT7ABptwKPQ52Ea7URk6vll88bWBVhodtnlfEuCcKWNGoc+uGbw1cwa9IKh/w==", + "version": "0.3.23", + "resolved": "https://registry.npmjs.org/language-subtag-registry/-/language-subtag-registry-0.3.23.tgz", + "integrity": "sha512-0K65Lea881pHotoGEa5gDlMxt3pctLi2RplBb7Ezh4rRdLEOtgi7n4EwK9lamnUCkKBqaeKRVebTq6BAxSkpXQ==", "dev": true }, "language-tags": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/language-tags/-/language-tags-1.0.5.tgz", - "integrity": "sha512-qJhlO9cGXi6hBGKoxEG/sKZDAHD5Hnu9Hs4WbOY3pCWXDhw0N8x1NenNzm2EnNLkLkk7J2SdxAkDSbb6ftT+UQ==", + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/language-tags/-/language-tags-1.0.9.tgz", + "integrity": "sha512-MbjN408fEndfiQXbFQ1vnd+1NoLDsnQW41410oQBXiyXDMYH5z505juWa4KUE1LqxRC7DgOgZDbKLxHIwm27hA==", "dev": true, "requires": { - "language-subtag-registry": "~0.3.2" + "language-subtag-registry": "^0.3.20" } }, "levn": { @@ -11400,11 +8932,6 @@ "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==" }, - "lodash.debounce": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", - "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" - }, "lodash.merge": { "version": "4.6.2", "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", @@ -11420,29 +8947,10 @@ } }, "lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "requires": { - "yallist": "^4.0.0" - } - }, - "make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "requires": { - "pify": "^4.0.1", - "semver": "^5.6.0" - }, - "dependencies": { - "semver": { - "version": "5.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", - "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==" - } - } + "version": "10.2.2", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.2.2.tgz", + "integrity": "sha512-9hp3Vp2/hFQUiIwKo8XCeFVnrg8Pk3TYNPIR7tJADKi5YfcF7vEaK7avFHTlSy3kOKYaJQaalfEo6YuXdceBOQ==", + "dev": true }, "merge2": { "version": "1.4.1", @@ -11451,12 +8959,12 @@ "dev": true }, "micromatch": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", - "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.7.tgz", + "integrity": "sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q==", "dev": true, "requires": { - "braces": "^3.0.2", + "braces": "^3.0.3", "picomatch": "^2.3.1" } }, @@ -11464,6 +8972,7 @@ "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, "requires": { "brace-expansion": "^1.1.7" } @@ -11473,6 +8982,12 @@ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==" }, + "minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true + }, "ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", @@ -11499,39 +9014,28 @@ "dev": true }, "next": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/next/-/next-13.5.6.tgz", - "integrity": "sha512-Y2wTcTbO4WwEsVb4A8VSnOsG1I9ok+h74q0ZdxkwM3EODqrs4pasq7O0iUxbcS9VtWMicG7f3+HAj0r1+NtKSw==", + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.4.tgz", + "integrity": "sha512-R8/V7vugY+822rsQGQCjoLhMuC9oFj9SOi4Cl4b2wjDrseD0LRZ10W7R6Czo4w9ZznVSshKjuIomsRjvm9EKJQ==", "requires": { - "@next/env": "13.5.6", - "@next/swc-darwin-arm64": "13.5.6", - "@next/swc-darwin-x64": "13.5.6", - "@next/swc-linux-arm64-gnu": "13.5.6", - "@next/swc-linux-arm64-musl": "13.5.6", - "@next/swc-linux-x64-gnu": "13.5.6", - "@next/swc-linux-x64-musl": "13.5.6", - "@next/swc-win32-arm64-msvc": "13.5.6", - "@next/swc-win32-ia32-msvc": "13.5.6", - "@next/swc-win32-x64-msvc": "13.5.6", - "@swc/helpers": "0.5.2", + "@next/env": "14.2.4", + "@next/swc-darwin-arm64": "14.2.4", + "@next/swc-darwin-x64": "14.2.4", + "@next/swc-linux-arm64-gnu": "14.2.4", + "@next/swc-linux-arm64-musl": "14.2.4", + "@next/swc-linux-x64-gnu": "14.2.4", + "@next/swc-linux-x64-musl": "14.2.4", + "@next/swc-win32-arm64-msvc": "14.2.4", + "@next/swc-win32-ia32-msvc": "14.2.4", + "@next/swc-win32-x64-msvc": "14.2.4", + "@swc/helpers": "0.5.5", "busboy": "1.6.0", - "caniuse-lite": "^1.0.30001406", + "caniuse-lite": "^1.0.30001579", + "graceful-fs": "^4.2.11", "postcss": "8.4.31", - "styled-jsx": "5.1.1", - "watchpack": "2.4.0" + "styled-jsx": "5.1.1" } }, - "node-releases": { - "version": "2.0.12", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.12.tgz", - "integrity": "sha512-QzsYKWhXTWx8h1kIvqfnC++o0pEmpRQA/aenALsL2F4pqNVr7YzcdMlDij5WBnwftRbJCNJL/O7zdKaxKPHqgQ==" - }, - "normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", - "optional": true - }, "nprogress": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz", @@ -11543,21 +9047,11 @@ "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==" }, "object-inspect": { - "version": "1.12.3", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", - "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==", + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", + "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", "dev": true }, - "object-is": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.5.tgz", - "integrity": "sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - } - }, "object-keys": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", @@ -11565,83 +9059,86 @@ "dev": true }, "object.assign": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.4.tgz", - "integrity": "sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==", + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.5.tgz", + "integrity": "sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==", "dev": true, "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", "has-symbols": "^1.0.3", "object-keys": "^1.1.1" } }, "object.entries": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.6.tgz", - "integrity": "sha512-leTPzo4Zvg3pmbQ3rDK69Rl8GQvIqMWubrkxONG9/ojtFE2rD9fjMKfSI5BxW3osRH1m6VdzmqK8oAY9aT4x5w==", + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.8.tgz", + "integrity": "sha512-cmopxi8VwRIAw/fkijJohSfpef5PdN0pMQJN6VC/ZKvn0LIknWD8KtgY6KlQdEc4tIjcQ3HxSMmnvtzIscdaYQ==", "dev": true, "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" } }, "object.fromentries": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.6.tgz", - "integrity": "sha512-VciD13dswC4j1Xt5394WR4MzmAQmlgN72phd/riNp9vtD7tp4QQWJ0R4wvclXcafgcYK8veHRed2W6XeGBvcfg==", + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.8.tgz", + "integrity": "sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ==", "dev": true, "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0" + } + }, + "object.groupby": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/object.groupby/-/object.groupby-1.0.3.tgz", + "integrity": "sha512-+Lhy3TQTuzXI5hevh8sBGqbmurHbbIjAi0Z4S63nthVLmLxfbj4T54a4CfZrXIrt9iP4mVAPYMo/v99taj3wjQ==", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2" } }, "object.hasown": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/object.hasown/-/object.hasown-1.1.2.tgz", - "integrity": "sha512-B5UIT3J1W+WuWIU55h0mjlwaqxiE5vYENJXIXZ4VFe05pNYrkKuK0U/6aFcb0pKywYJh7IhfoqUfKVmrJJHZHw==", + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/object.hasown/-/object.hasown-1.1.4.tgz", + "integrity": "sha512-FZ9LZt9/RHzGySlBARE3VF+gE26TxR38SdmqOqliuTnl9wrKulaQs+4dee1V+Io8VfxqzAfHu6YuRgUy8OHoTg==", "dev": true, "requires": { - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0" } }, "object.values": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.6.tgz", - "integrity": "sha512-FVVTkD1vENCsAcwNs9k6jea2uHC/X0+JcjG8YA60FN5CMaJmG95wT9jek/xX9nornqGRrBkKtzuAu2wuHpKqvw==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.2.0.tgz", + "integrity": "sha512-yBYjY9QX2hnRmZHAjG/f13MzmBzxzYgQhFrke06TTyKY5zSTEqkOeukBzIdVA3j3ulu8Qa3MbVFShV7T2RmGtQ==", "dev": true, "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" } }, "once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, "requires": { "wrappy": "1" } }, - "open": { - "version": "8.4.2", - "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", - "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==", - "dev": true, - "requires": { - "define-lazy-prop": "^2.0.0", - "is-docker": "^2.1.1", - "is-wsl": "^2.2.0" - } - }, "optionator": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", - "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", "dev": true, "requires": { "deep-is": "^0.1.3", @@ -11649,7 +9146,7 @@ "levn": "^0.4.1", "prelude-ls": "^1.2.1", "type-check": "^0.4.0", - "word-wrap": "^1.2.3" + "word-wrap": "^1.2.5" } }, "p-limit": { @@ -11698,7 +9195,8 @@ "path-is-absolute": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==" + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true }, "path-key": { "version": "3.1.1", @@ -11711,26 +9209,37 @@ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" }, + "path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "dev": true, + "requires": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + } + }, "path-type": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==" }, "picocolors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", - "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", + "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==" }, "picomatch": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "devOptional": true + "dev": true }, - "pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==" + "possible-typed-array-names": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz", + "integrity": "sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q==", + "dev": true }, "postcss": { "version": "8.4.31", @@ -11776,14 +9285,14 @@ } }, "property-expr": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/property-expr/-/property-expr-2.0.5.tgz", - "integrity": "sha512-IJUkICM5dP5znhCckHSv30Q4b5/JA5enCtkRHYaOVOAocnH/1BQEYTC5NMfT3AVl/iXKdr3aqQbQn9DxyWknwA==" + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/property-expr/-/property-expr-2.0.6.tgz", + "integrity": "sha512-SVtmxhRE/CGkn3eZY1T6pC8Nln6Fr/lu1mKSgRud0eC73whjGfoAogbn78LkD8aFL0zz3bAFerKSnOl7NlErBA==" }, "punycode": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", - "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", "dev": true }, "queue-microtask": { @@ -11800,9 +9309,9 @@ } }, "react": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz", - "integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==", + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", "requires": { "loose-envify": "^1.1.0" } @@ -11816,12 +9325,12 @@ } }, "react-dom": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", - "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==", + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", "requires": { "loose-envify": "^1.1.0", - "scheduler": "^0.23.0" + "scheduler": "^0.23.2" } }, "react-fast-compare": { @@ -11830,9 +9339,9 @@ "integrity": "sha512-suNP+J1VU1MWFKcyt7RtjiSWUjvidmQSlqu+eHslq+342xCbGTYmC0mEhPCOHxlW0CywylOC1u2DFAT+bv4dBw==" }, "react-is": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", - "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==" + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==" }, "react-transition-group": { "version": "4.4.5", @@ -11854,59 +9363,38 @@ "inherits": "~2.0.1", "isarray": "0.0.1", "string_decoder": "~0.10.x" - }, - "dependencies": { - "isarray": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", - "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==" - } } }, - "readdirp": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", - "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", - "optional": true, + "reflect.getprototypeof": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.6.tgz", + "integrity": "sha512-fmfw4XgoDke3kdI6h4xcUz1dG8uaiv5q9gcEwLS4Pnth2kxT+GZ7YehS1JTMGBQmtV7Y4GFGbs2re2NqhdozUg==", + "dev": true, "requires": { - "picomatch": "^2.2.1" - } - }, - "regenerate": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", - "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" - }, - "regenerate-unicode-properties": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.0.tgz", - "integrity": "sha512-d1VudCLoIGitcU/hEg2QqvyGZQmdC0Lf8BqdOMXGFSvJP4bNV1+XqbPQeHHLD51Jh4QJJ225dlIFvY4Ly6MXmQ==", - "requires": { - "regenerate": "^1.4.2" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.1", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "globalthis": "^1.0.3", + "which-builtin-type": "^1.1.3" } }, "regenerator-runtime": { - "version": "0.13.11", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", - "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==" - }, - "regenerator-transform": { - "version": "0.15.1", - "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.1.tgz", - "integrity": "sha512-knzmNAcuyxV+gQCufkYcvOqX/qIIfHLv0u5x79kRxuGojfYVky1f15TzZEu2Avte8QGepvUNTnLskf8E6X6Vyg==", - "requires": { - "@babel/runtime": "^7.8.4" - } + "version": "0.14.1", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" }, "regexp.prototype.flags": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.4.3.tgz", - "integrity": "sha512-fjggEOO3slI6Wvgjwflkc4NFRCTZAu5CnNfBd5qOMYhWdn67nJBBu34/TkD++eeFmd8C9r9jfXJ27+nSiRkSUA==", + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.2.tgz", + "integrity": "sha512-NcDiDkTLuPR+++OCKB0nWafEmhg/Da8aUPLPMQbK+bxKKCm1/S5he+AqYa4PlMCVBalb4/yxIRub6qkEx5yJbw==", "dev": true, "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "functions-have-names": "^1.2.2" + "call-bind": "^1.0.6", + "define-properties": "^1.2.1", + "es-errors": "^1.3.0", + "set-function-name": "^2.0.1" } }, "regexpp": { @@ -11915,40 +9403,12 @@ "integrity": "sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==", "dev": true }, - "regexpu-core": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz", - "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==", - "requires": { - "@babel/regjsgen": "^0.8.0", - "regenerate": "^1.4.2", - "regenerate-unicode-properties": "^10.1.0", - "regjsparser": "^0.9.1", - "unicode-match-property-ecmascript": "^2.0.0", - "unicode-match-property-value-ecmascript": "^2.1.0" - } - }, - "regjsparser": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz", - "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", - "requires": { - "jsesc": "~0.5.0" - }, - "dependencies": { - "jsesc": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", - "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==" - } - } - }, "resolve": { - "version": "1.22.1", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz", - "integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==", + "version": "1.22.8", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", + "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", "requires": { - "is-core-module": "^2.9.0", + "is-core-module": "^2.13.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" } @@ -11958,6 +9418,12 @@ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==" }, + "resolve-pkg-maps": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", + "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", + "dev": true + }, "reusify": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", @@ -11988,37 +9454,80 @@ "queue-microtask": "^1.2.2" } }, - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - }, - "safe-regex-test": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.0.tgz", - "integrity": "sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA==", + "safe-array-concat": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.2.tgz", + "integrity": "sha512-vj6RsCsWBCf19jIeHEfkRMw8DPiBb+DMXklQ/1SGDHOMlHdPUkZXFQ2YdplS23zESTijAcurb1aSgJA3AgMu1Q==", "dev": true, "requires": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.3", + "call-bind": "^1.0.7", + "get-intrinsic": "^1.2.4", + "has-symbols": "^1.0.3", + "isarray": "^2.0.5" + }, + "dependencies": { + "isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + "dev": true + } + } + }, + "safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" + }, + "safe-regex-test": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.3.tgz", + "integrity": "sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw==", + "dev": true, + "requires": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", "is-regex": "^1.1.4" } }, "scheduler": { - "version": "0.23.0", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", - "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==", + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", "requires": { "loose-envify": "^1.1.0" } }, "semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", + "integrity": "sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==", + "dev": true + }, + "set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", "dev": true, "requires": { - "lru-cache": "^6.0.0" + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + } + }, + "set-function-name": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/set-function-name/-/set-function-name-2.0.2.tgz", + "integrity": "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==", + "dev": true, + "requires": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "functions-have-names": "^1.2.3", + "has-property-descriptors": "^1.0.2" } }, "shallowequal": { @@ -12042,16 +9551,23 @@ "dev": true }, "side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", "dev": true, "requires": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" } }, + "signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true + }, "simple-peer": { "version": "9.11.1", "resolved": "https://registry.npmjs.org/simple-peer/-/simple-peer-9.11.1.tgz", @@ -12076,11 +9592,6 @@ "util-deprecate": "^1.0.1" } }, - "safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - }, "string_decoder": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", @@ -12092,20 +9603,22 @@ } }, "simplebar-core": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/simplebar-core/-/simplebar-core-1.2.1.tgz", - "integrity": "sha512-dgX6qDOSDM3+crxFstIKOlxyxnD3NYGRPm7CqmAtnNfFLVeadrydym5eNpduIO7aDFU/rayS6hEdrcxO0WBqnQ==", + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/simplebar-core/-/simplebar-core-1.2.5.tgz", + "integrity": "sha512-33AVCYXS8yavWId0GbE4TG1cYELsYybpCKWHJYuWEY/j6nccgz6zQdJ7nCqOpIGo7HgPPbkSSSIlJhi43fHP6A==", "requires": { + "@types/lodash-es": "^4.17.6", "can-use-dom": "^0.1.0", + "lodash": "^4.17.21", "lodash-es": "^4.17.21" } }, "simplebar-react": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/simplebar-react/-/simplebar-react-3.2.1.tgz", - "integrity": "sha512-viHQs/34ZQk956b88Kl7+VckWO8DKOhg2Hkl8kjPv2q16w+/nnOJOGhZKrpj/egMGqDpQywou2AhdRkAM0oddA==", + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/simplebar-react/-/simplebar-react-3.2.5.tgz", + "integrity": "sha512-ZstHCBF1Is2Lj+Un8NUYSHVCmn8ufi25ylP9UH2bDnASa+V+M+6/thGhUZOZ7YNpFFHTNgVIID3FHdwRqNuqZA==", "requires": { - "simplebar-core": "^1.2.1" + "simplebar-core": "^1.2.5" } }, "slash": { @@ -12115,13 +9628,13 @@ "dev": true }, "socket.io-client": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/socket.io-client/-/socket.io-client-4.6.2.tgz", - "integrity": "sha512-OwWrMbbA8wSqhBAR0yoPK6EdQLERQAYjXb3A0zLpgxfM1ZGLKoxHx8gVmCHA6pcclRX5oA/zvQf7bghAS11jRA==", + "version": "4.7.5", + "resolved": "https://registry.npmjs.org/socket.io-client/-/socket.io-client-4.7.5.tgz", + "integrity": "sha512-sJ/tqHOCe7Z50JCBCXrsY3I2k03iOiUe+tj1OmKeD2lXPiGH/RUCdTZFoqVyN7l1MnpIzPrGtLcijffmeouNlQ==", "requires": { "@socket.io/component-emitter": "~3.1.0", "debug": "~4.3.2", - "engine.io-client": "~6.4.0", + "engine.io-client": "~6.5.2", "socket.io-parser": "~4.2.4" } }, @@ -12140,18 +9653,9 @@ "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==" }, "source-map-js": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", - "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==" - }, - "stop-iteration-iterator": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/stop-iteration-iterator/-/stop-iteration-iterator-1.0.0.tgz", - "integrity": "sha512-iCGQj+0l0HOdZ2AEeBADlsRC+vsnDsZsbdSiH1yNSjcfKM7fdpCMfqAL/dwF5BLiw/XhRft/Wax6zQbhq2BcjQ==", - "dev": true, - "requires": { - "internal-slot": "^1.0.4" - } + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", + "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==" }, "streamsearch": { "version": "1.1.0", @@ -12163,42 +9667,105 @@ "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", "integrity": "sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ==" }, - "string.prototype.matchall": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.8.tgz", - "integrity": "sha512-6zOCOcJ+RJAQshcTvXPHoxoQGONa3e/Lqx90wUA+wEzX78sg5Bo+1tQo4N0pohS0erG9qtCqJDjNCQBjeWVxyg==", + "string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", "dev": true, "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "get-intrinsic": "^1.1.3", + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "dependencies": { + "ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "dev": true + }, + "strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "requires": { + "ansi-regex": "^6.0.1" + } + } + } + }, + "string-width-cjs": { + "version": "npm:string-width@4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "dependencies": { + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + } + } + }, + "string.prototype.matchall": { + "version": "4.0.11", + "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.11.tgz", + "integrity": "sha512-NUdh0aDavY2og7IbBPenWqR9exH+E26Sv8e0/eTe1tltDGZL+GtBkDAnnyBtmekfK6/Dq3MkcGtzXFEd1LQrtg==", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", "has-symbols": "^1.0.3", - "internal-slot": "^1.0.3", - "regexp.prototype.flags": "^1.4.3", - "side-channel": "^1.0.4" + "internal-slot": "^1.0.7", + "regexp.prototype.flags": "^1.5.2", + "set-function-name": "^2.0.2", + "side-channel": "^1.0.6" + } + }, + "string.prototype.trim": { + "version": "1.2.9", + "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.9.tgz", + "integrity": "sha512-klHuCNxiMZ8MlsOihJhJEBJAiMVqU3Z2nEXWfWnIqjN0gEFS9J9+IxKozWWtQGcgoa1WUZzLjKPTr4ZHNFTFxw==", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.0", + "es-object-atoms": "^1.0.0" } }, "string.prototype.trimend": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.6.tgz", - "integrity": "sha512-JySq+4mrPf9EsDBEDYMOb/lM7XQLulwg5R/m1r0PXEFqrV0qHvl58sdTilSXtKOflCsK2E8jxf+GKC0T07RWwQ==", + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.8.tgz", + "integrity": "sha512-p73uL5VCHCO2BZZ6krwwQE3kCzM7NKmis8S//xEC6fQonchbum4eP6kR4DLEjQFO3Wnj3Fuo8NM0kOSjVdHjZQ==", "dev": true, "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" } }, "string.prototype.trimstart": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.6.tgz", - "integrity": "sha512-omqjMDaY92pbn5HOX7f9IccLA+U1tA9GvtU4JrodiXFfYB7jPzzHpRzpglLAjtUV6bB557zwClJezTqnAiYnQA==", + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz", + "integrity": "sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==", "dev": true, "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" } }, "strip-ansi": { @@ -12210,6 +9777,15 @@ "ansi-regex": "^5.0.1" } }, + "strip-ansi-cjs": { + "version": "npm:strip-ansi@6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "requires": { + "ansi-regex": "^5.0.1" + } + }, "strip-bom": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", @@ -12223,37 +9799,40 @@ "dev": true }, "styled-components": { - "version": "6.0.0-rc.3", - "resolved": "https://registry.npmjs.org/styled-components/-/styled-components-6.0.0-rc.3.tgz", - "integrity": "sha512-5FbCTxynopck99GRwM5Ey0+VRp8pkQq69TwGOJJeYtR7gPvwGjNx8yBPLN7/dfxwwvn9ymOZYB19eQkv2k70wQ==", + "version": "6.1.11", + "resolved": "https://registry.npmjs.org/styled-components/-/styled-components-6.1.11.tgz", + "integrity": "sha512-Ui0jXPzbp1phYij90h12ksljKGqF8ncGx+pjrNPsSPhbUUjWT2tD1FwGo2LF6USCnbrsIhNngDfodhxbegfEOA==", "requires": { - "@babel/cli": "^7.21.0", - "@babel/core": "^7.21.0", - "@babel/helper-module-imports": "^7.18.6", - "@babel/plugin-external-helpers": "^7.18.6", - "@babel/plugin-proposal-class-properties": "^7.18.6", - "@babel/plugin-proposal-object-rest-spread": "^7.20.7", - "@babel/preset-env": "^7.20.2", - "@babel/preset-react": "^7.18.6", - "@babel/preset-typescript": "^7.21.0", - "@babel/traverse": "^7.21.2", - "@emotion/unitless": "^0.8.0", - "css-to-react-native": "^3.2.0", - "postcss": "^8.4.23", - "shallowequal": "^1.1.0", - "stylis": "^4.2.0", - "tslib": "^2.5.0" + "@emotion/is-prop-valid": "1.2.2", + "@emotion/unitless": "0.8.1", + "@types/stylis": "4.2.5", + "css-to-react-native": "3.2.0", + "csstype": "3.1.3", + "postcss": "8.4.38", + "shallowequal": "1.1.0", + "stylis": "4.3.2", + "tslib": "2.6.2" }, "dependencies": { + "postcss": { + "version": "8.4.38", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.38.tgz", + "integrity": "sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==", + "requires": { + "nanoid": "^3.3.7", + "picocolors": "^1.0.0", + "source-map-js": "^1.2.0" + } + }, "stylis": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", - "integrity": "sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==" + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.2.tgz", + "integrity": "sha512-bhtUjWd/z6ltJiQwg0dUfxEJ+W+jdqQd8TbWLWyeIJHlnsqmGLRFFd8e5mA0AZi/zx90smXRlN66YMTcaSFifg==" }, "tslib": { - "version": "2.5.3", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.5.3.tgz", - "integrity": "sha512-mSxlJJwl3BMEQCUNnxXBU9jP4JBktcEGhURcPR6VQVlnP0FdDEsIaz0C35dXNGLyRfrATNofF0F5p2KPxQgB+w==" + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", + "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" } } }, @@ -12271,12 +9850,11 @@ "integrity": "sha512-GP6WDNWf+o403jrEp9c5jibKavrtLW+/qYGhFxFrG8maXhwTBI7gLLhiBb0o7uFccWN+EOS9aMO6cGHWAO07OA==" }, "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", "requires": { - "has-flag": "^4.0.0" + "has-flag": "^3.0.0" } }, "supports-preserve-symlinks-flag": { @@ -12348,24 +9926,6 @@ "svg.js": "^2.6.5" } }, - "synckit": { - "version": "0.8.5", - "resolved": "https://registry.npmjs.org/synckit/-/synckit-0.8.5.tgz", - "integrity": "sha512-L1dapNV6vu2s/4Sputv8xGsCdAVlb5nRDMFU/E27D44l5U6cw1g0dGd45uLc+OXjNMmF4ntiMdCimzcjFKQI8Q==", - "dev": true, - "requires": { - "@pkgr/utils": "^2.3.1", - "tslib": "^2.5.0" - }, - "dependencies": { - "tslib": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.5.0.tgz", - "integrity": "sha512-336iVw3rtn2BUK7ORdIAHTyxHGRIHVReokCR3XjbckJMK7ms8FysBfhLR8IXnAgy7T0PTPNBWKiH514FOW/WSg==", - "dev": true - } - } - }, "tapable": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", @@ -12397,16 +9957,6 @@ "resolved": "https://registry.npmjs.org/tiny-case/-/tiny-case-1.0.3.tgz", "integrity": "sha512-Eet/eeMhkO6TX8mnUteS9zgPbUMQa4I6Kkp5ORiBD5476/m+PIRiumP5tmh5ioJpH7k51Kehawy2UDfsnxxY8Q==" }, - "tiny-glob": { - "version": "0.2.9", - "resolved": "https://registry.npmjs.org/tiny-glob/-/tiny-glob-0.2.9.tgz", - "integrity": "sha512-g/55ssRPUjShh+xkfx9UPDXqhckHEsHr4Vd9zX55oSdGZc/MD0m3sferOkwWtp98bv+kcVfEHtRJgBVJzelrzg==", - "dev": true, - "requires": { - "globalyzer": "0.1.0", - "globrex": "^0.1.2" - } - }, "tiny-warning": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", @@ -12421,7 +9971,7 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "devOptional": true, + "dev": true, "requires": { "is-number": "^7.0.0" } @@ -12432,13 +9982,13 @@ "integrity": "sha512-0a5EOkAUp8D4moMi2W8ZF8jcga7BgZd91O/yabJCFY8az+XSzeGyTKs0Aoo897iV1Nj6guFq8orWDS96z91oGg==" }, "tsconfig-paths": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.14.1.tgz", - "integrity": "sha512-fxDhWnFSLt3VuTwtvJt5fpwxBHg5AdKWMsgcPOOIilyjymcYVZoCQF8fvFRezCNfblEXmi+PcM1eYHeOAgXCOQ==", + "version": "3.15.0", + "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.15.0.tgz", + "integrity": "sha512-2Ac2RgzDe/cn48GvOe3M+o82pEFewD3UPbyoUHHdKasHwJKjds4fLXWf/Ux5kATBKN20oaFGu+jbElp1pos0mg==", "dev": true, "requires": { "@types/json5": "^0.0.29", - "json5": "^1.0.1", + "json5": "^1.0.2", "minimist": "^1.2.6", "strip-bom": "^3.0.0" } @@ -12467,26 +10017,66 @@ } }, "type-fest": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", - "dev": true + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", + "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==" }, - "typed-array-length": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.4.tgz", - "integrity": "sha512-KjZypGq+I/H7HI5HlOoGHkWUUGq+Q0TPhQurLbyrVrvnKTBgzLhIJ7j6J/XTQOi0d1RjyZ0wdas8bKs2p0x3Ng==", + "typed-array-buffer": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.2.tgz", + "integrity": "sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ==", "dev": true, "requires": { - "call-bind": "^1.0.2", + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "is-typed-array": "^1.1.13" + } + }, + "typed-array-byte-length": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.1.tgz", + "integrity": "sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw==", + "dev": true, + "requires": { + "call-bind": "^1.0.7", "for-each": "^0.3.3", - "is-typed-array": "^1.1.9" + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13" + } + }, + "typed-array-byte-offset": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.2.tgz", + "integrity": "sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA==", + "dev": true, + "requires": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13" + } + }, + "typed-array-length": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.6.tgz", + "integrity": "sha512-/OxDN6OtAk5KBpGb28T+HZc2M+ADtvRxXrKKbUwtsLgdoxgX13hyy7ek6bFRl5+aBs2yZzB0c4CnQfAtVypW/g==", + "dev": true, + "requires": { + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13", + "possible-typed-array-names": "^1.0.0" } }, "typescript": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", - "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==", + "version": "5.4.5", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.4.5.tgz", + "integrity": "sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==", "dev": true, "peer": true }, @@ -12502,39 +10092,6 @@ "which-boxed-primitive": "^1.0.2" } }, - "unicode-canonical-property-names-ecmascript": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", - "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==" - }, - "unicode-match-property-ecmascript": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", - "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", - "requires": { - "unicode-canonical-property-names-ecmascript": "^2.0.0", - "unicode-property-aliases-ecmascript": "^2.0.0" - } - }, - "unicode-match-property-value-ecmascript": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz", - "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==" - }, - "unicode-property-aliases-ecmascript": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", - "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==" - }, - "update-browserslist-db": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", - "integrity": "sha512-dCwEFf0/oT85M1fHBg4F0jtLwJrutGoHSQXCh7u4o2t1drG+c0a9Flnqww6XUKSfQMPpJBRjU8d4RXB09qtvaA==", - "requires": { - "escalade": "^3.1.1", - "picocolors": "^1.0.0" - } - }, "uri-js": { "version": "4.4.1", "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", @@ -12549,15 +10106,6 @@ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" }, - "watchpack": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz", - "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", - "requires": { - "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.1.2" - } - }, "which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", @@ -12580,42 +10128,158 @@ "is-symbol": "^1.0.3" } }, - "which-collection": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.1.tgz", - "integrity": "sha512-W8xeTUwaln8i3K/cY1nGXzdnVZlidBcagyNFtBdD5kxnb4TvGKR7FfSIS3mYpwWS1QUCutfKz8IY8RjftB0+1A==", + "which-builtin-type": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.1.3.tgz", + "integrity": "sha512-YmjsSMDBYsM1CaFiayOVT06+KJeXf0o5M/CAd4o1lTadFAtacTUM49zoYxr/oroopFDfhvN6iEcBxUyc3gvKmw==", "dev": true, "requires": { - "is-map": "^2.0.1", - "is-set": "^2.0.1", - "is-weakmap": "^2.0.1", - "is-weakset": "^2.0.1" + "function.prototype.name": "^1.1.5", + "has-tostringtag": "^1.0.0", + "is-async-function": "^2.0.0", + "is-date-object": "^1.0.5", + "is-finalizationregistry": "^1.0.2", + "is-generator-function": "^1.0.10", + "is-regex": "^1.1.4", + "is-weakref": "^1.0.2", + "isarray": "^2.0.5", + "which-boxed-primitive": "^1.0.2", + "which-collection": "^1.0.1", + "which-typed-array": "^1.1.9" + }, + "dependencies": { + "isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + "dev": true + } + } + }, + "which-collection": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.2.tgz", + "integrity": "sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==", + "dev": true, + "requires": { + "is-map": "^2.0.3", + "is-set": "^2.0.3", + "is-weakmap": "^2.0.2", + "is-weakset": "^2.0.3" } }, "which-typed-array": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.9.tgz", - "integrity": "sha512-w9c4xkx6mPidwp7180ckYWfMmvxpjlZuIudNtDf4N/tTAUB8VJbX25qZoAsrtGuYNnGw3pa0AXgbGKRB8/EceA==", + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.15.tgz", + "integrity": "sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA==", "dev": true, "requires": { - "available-typed-arrays": "^1.0.5", - "call-bind": "^1.0.2", + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", "for-each": "^0.3.3", "gopd": "^1.0.1", - "has-tostringtag": "^1.0.0", - "is-typed-array": "^1.1.10" + "has-tostringtag": "^1.0.2" } }, "word-wrap": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", "dev": true }, + "wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "requires": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "dependencies": { + "ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "dev": true + }, + "ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "dev": true + }, + "strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "requires": { + "ansi-regex": "^6.0.1" + } + } + } + }, + "wrap-ansi-cjs": { + "version": "npm:wrap-ansi@7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "requires": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + } + } + } + }, "wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true }, "ws": { "version": "8.11.0", @@ -12643,12 +10307,6 @@ } } }, - "yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true - }, "yaml": { "version": "1.10.2", "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", @@ -12669,13 +10327,6 @@ "tiny-case": "^1.0.3", "toposort": "^2.0.2", "type-fest": "^2.19.0" - }, - "dependencies": { - "type-fest": { - "version": "2.19.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", - "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==" - } } } } diff --git a/frontend/package.json b/frontend/package.json index 5b6e148..1003220 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -26,12 +26,12 @@ "apexcharts": "3.37.0", "date-fns": "2.29.3", "formik": "2.2.9", - "next": "13.5.6", + "next": "^14.2.4", "nprogress": "0.2.0", "prop-types": "15.8.1", - "react": "18.2.0", + "react": "^18.3.1", "react-apexcharts": "1.4.0", - "react-dom": "18.2.0", + "react-dom": "^18.3.1", "simple-peer": "^9.11.1", "simplebar-react": "^3.2.1", "socket.io-client": "^4.6.2", @@ -45,6 +45,6 @@ "@types/react": "18.0.28", "@types/react-dom": "18.0.11", "eslint": "8.34.0", - "eslint-config-next": "13.1.6" + "eslint-config-next": "^14.2.4" } } From 0e861742222972ad1bee51bd11805fbcaf66fa45 Mon Sep 17 00:00:00 2001 From: leandrofars Date: Thu, 13 Jun 2024 12:34:18 -0300 Subject: [PATCH 20/52] chore(controller): update golang dependencies --- backend/services/controller/go.mod | 10 +- backend/services/controller/go.sum | 19 +- .../github.com/golang-jwt/jwt/v5/.gitignore | 4 - .../github.com/golang-jwt/jwt/v5/LICENSE | 9 - .../golang-jwt/jwt/v5/MIGRATION_GUIDE.md | 195 - .../github.com/golang-jwt/jwt/v5/README.md | 167 - .../github.com/golang-jwt/jwt/v5/SECURITY.md | 19 - .../golang-jwt/jwt/v5/VERSION_HISTORY.md | 137 - .../github.com/golang-jwt/jwt/v5/claims.go | 16 - .../github.com/golang-jwt/jwt/v5/doc.go | 4 - .../github.com/golang-jwt/jwt/v5/ecdsa.go | 134 - .../golang-jwt/jwt/v5/ecdsa_utils.go | 69 - .../github.com/golang-jwt/jwt/v5/ed25519.go | 79 - .../golang-jwt/jwt/v5/ed25519_utils.go | 64 - .../github.com/golang-jwt/jwt/v5/errors.go | 49 - .../golang-jwt/jwt/v5/errors_go1_20.go | 47 - .../golang-jwt/jwt/v5/errors_go_other.go | 78 - .../github.com/golang-jwt/jwt/v5/hmac.go | 104 - .../golang-jwt/jwt/v5/map_claims.go | 109 - .../github.com/golang-jwt/jwt/v5/none.go | 50 - .../github.com/golang-jwt/jwt/v5/parser.go | 238 - .../golang-jwt/jwt/v5/parser_option.go | 128 - .../golang-jwt/jwt/v5/registered_claims.go | 63 - .../github.com/golang-jwt/jwt/v5/rsa.go | 93 - .../github.com/golang-jwt/jwt/v5/rsa_pss.go | 135 - .../github.com/golang-jwt/jwt/v5/rsa_utils.go | 107 - .../golang-jwt/jwt/v5/signing_method.go | 49 - .../golang-jwt/jwt/v5/staticcheck.conf | 1 - .../github.com/golang-jwt/jwt/v5/token.go | 100 - .../golang-jwt/jwt/v5/token_option.go | 5 - .../github.com/golang-jwt/jwt/v5/types.go | 149 - .../github.com/golang-jwt/jwt/v5/validator.go | 316 - .../github.com/golang/snappy/.gitignore | 16 - .../vendor/github.com/golang/snappy/AUTHORS | 15 - .../github.com/golang/snappy/CONTRIBUTORS | 37 - .../vendor/github.com/golang/snappy/LICENSE | 27 - .../vendor/github.com/golang/snappy/README | 107 - .../vendor/github.com/golang/snappy/decode.go | 237 - .../github.com/golang/snappy/decode_amd64.go | 14 - .../github.com/golang/snappy/decode_amd64.s | 490 - .../github.com/golang/snappy/decode_other.go | 101 - .../vendor/github.com/golang/snappy/encode.go | 285 - .../github.com/golang/snappy/encode_amd64.go | 29 - .../github.com/golang/snappy/encode_amd64.s | 730 -- .../github.com/golang/snappy/encode_other.go | 238 - .../vendor/github.com/golang/snappy/snappy.go | 98 - .../vendor/github.com/google/uuid/.travis.yml | 9 - .../github.com/google/uuid/CONTRIBUTING.md | 10 - .../github.com/google/uuid/CONTRIBUTORS | 9 - .../vendor/github.com/google/uuid/LICENSE | 27 - .../vendor/github.com/google/uuid/README.md | 19 - .../vendor/github.com/google/uuid/dce.go | 80 - .../vendor/github.com/google/uuid/doc.go | 12 - .../vendor/github.com/google/uuid/hash.go | 53 - .../vendor/github.com/google/uuid/marshal.go | 38 - .../vendor/github.com/google/uuid/node.go | 90 - .../vendor/github.com/google/uuid/node_js.go | 12 - .../vendor/github.com/google/uuid/node_net.go | 33 - .../vendor/github.com/google/uuid/null.go | 118 - .../vendor/github.com/google/uuid/sql.go | 59 - .../vendor/github.com/google/uuid/time.go | 123 - .../vendor/github.com/google/uuid/util.go | 43 - .../vendor/github.com/google/uuid/uuid.go | 294 - .../vendor/github.com/google/uuid/version1.go | 44 - .../vendor/github.com/google/uuid/version4.go | 76 - .../vendor/github.com/gorilla/mux/AUTHORS | 8 - .../vendor/github.com/gorilla/mux/LICENSE | 27 - .../vendor/github.com/gorilla/mux/README.md | 805 -- .../vendor/github.com/gorilla/mux/doc.go | 306 - .../github.com/gorilla/mux/middleware.go | 74 - .../vendor/github.com/gorilla/mux/mux.go | 606 -- .../vendor/github.com/gorilla/mux/regexp.go | 388 - .../vendor/github.com/gorilla/mux/route.go | 736 -- .../github.com/gorilla/mux/test_helpers.go | 19 - .../github.com/joho/godotenv/.gitignore | 1 - .../vendor/github.com/joho/godotenv/LICENCE | 23 - .../vendor/github.com/joho/godotenv/README.md | 202 - .../github.com/joho/godotenv/godotenv.go | 228 - .../vendor/github.com/joho/godotenv/parser.go | 271 - .../klauspost/compress/.gitattributes | 2 - .../github.com/klauspost/compress/.gitignore | 32 - .../klauspost/compress/.goreleaser.yml | 127 - .../github.com/klauspost/compress/LICENSE | 304 - .../github.com/klauspost/compress/README.md | 661 -- .../github.com/klauspost/compress/SECURITY.md | 25 - .../klauspost/compress/compressible.go | 85 - .../klauspost/compress/flate/deflate.go | 1017 --- .../klauspost/compress/flate/dict_decoder.go | 184 - .../klauspost/compress/flate/fast_encoder.go | 193 - .../compress/flate/huffman_bit_writer.go | 1182 --- .../klauspost/compress/flate/huffman_code.go | 417 - .../compress/flate/huffman_sortByFreq.go | 159 - .../compress/flate/huffman_sortByLiteral.go | 201 - .../klauspost/compress/flate/inflate.go | 829 -- .../klauspost/compress/flate/inflate_gen.go | 1283 --- .../klauspost/compress/flate/level1.go | 241 - .../klauspost/compress/flate/level2.go | 214 - .../klauspost/compress/flate/level3.go | 241 - .../klauspost/compress/flate/level4.go | 221 - .../klauspost/compress/flate/level5.go | 708 -- .../klauspost/compress/flate/level6.go | 325 - .../compress/flate/matchlen_amd64.go | 16 - .../klauspost/compress/flate/matchlen_amd64.s | 68 - .../compress/flate/matchlen_generic.go | 33 - .../klauspost/compress/flate/regmask_amd64.go | 37 - .../klauspost/compress/flate/regmask_other.go | 40 - .../klauspost/compress/flate/stateless.go | 318 - .../klauspost/compress/flate/token.go | 379 - .../klauspost/compress/fse/README.md | 79 - .../klauspost/compress/fse/bitreader.go | 122 - .../klauspost/compress/fse/bitwriter.go | 167 - .../klauspost/compress/fse/bytereader.go | 47 - .../klauspost/compress/fse/compress.go | 683 -- .../klauspost/compress/fse/decompress.go | 376 - .../github.com/klauspost/compress/fse/fse.go | 144 - .../github.com/klauspost/compress/gen.sh | 4 - .../klauspost/compress/huff0/.gitignore | 1 - .../klauspost/compress/huff0/README.md | 89 - .../klauspost/compress/huff0/bitreader.go | 229 - .../klauspost/compress/huff0/bitwriter.go | 102 - .../klauspost/compress/huff0/bytereader.go | 44 - .../klauspost/compress/huff0/compress.go | 741 -- .../klauspost/compress/huff0/decompress.go | 1167 --- .../compress/huff0/decompress_amd64.go | 226 - .../compress/huff0/decompress_amd64.s | 830 -- .../compress/huff0/decompress_generic.go | 299 - .../klauspost/compress/huff0/huff0.go | 337 - .../compress/internal/cpuinfo/cpuinfo.go | 34 - .../internal/cpuinfo/cpuinfo_amd64.go | 11 - .../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 - .../compress/internal/snapref/LICENSE | 27 - .../compress/internal/snapref/decode.go | 264 - .../compress/internal/snapref/decode_other.go | 113 - .../compress/internal/snapref/encode.go | 289 - .../compress/internal/snapref/encode_other.go | 250 - .../compress/internal/snapref/snappy.go | 98 - .../github.com/klauspost/compress/s2sx.mod | 4 - .../github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/zstd/README.md | 441 - .../klauspost/compress/zstd/bitreader.go | 136 - .../klauspost/compress/zstd/bitwriter.go | 112 - .../klauspost/compress/zstd/blockdec.go | 726 -- .../klauspost/compress/zstd/blockenc.go | 889 -- .../compress/zstd/blocktype_string.go | 85 - .../klauspost/compress/zstd/bytebuf.go | 131 - .../klauspost/compress/zstd/bytereader.go | 82 - .../klauspost/compress/zstd/decodeheader.go | 229 - .../klauspost/compress/zstd/decoder.go | 948 -- .../compress/zstd/decoder_options.go | 169 - .../klauspost/compress/zstd/dict.go | 534 -- .../klauspost/compress/zstd/enc_base.go | 173 - .../klauspost/compress/zstd/enc_best.go | 531 -- .../klauspost/compress/zstd/enc_better.go | 1242 --- .../klauspost/compress/zstd/enc_dfast.go | 1123 --- .../klauspost/compress/zstd/enc_fast.go | 891 -- .../klauspost/compress/zstd/encoder.go | 619 -- .../compress/zstd/encoder_options.go | 339 - .../klauspost/compress/zstd/framedec.go | 413 - .../klauspost/compress/zstd/frameenc.go | 137 - .../klauspost/compress/zstd/fse_decoder.go | 307 - .../compress/zstd/fse_decoder_amd64.go | 65 - .../compress/zstd/fse_decoder_amd64.s | 126 - .../compress/zstd/fse_decoder_generic.go | 72 - .../klauspost/compress/zstd/fse_encoder.go | 701 -- .../klauspost/compress/zstd/fse_predefined.go | 158 - .../klauspost/compress/zstd/hash.go | 35 - .../klauspost/compress/zstd/history.go | 116 - .../compress/zstd/internal/xxhash/LICENSE.txt | 22 - .../compress/zstd/internal/xxhash/README.md | 71 - .../compress/zstd/internal/xxhash/xxhash.go | 230 - .../zstd/internal/xxhash/xxhash_amd64.s | 210 - .../zstd/internal/xxhash/xxhash_arm64.s | 184 - .../zstd/internal/xxhash/xxhash_asm.go | 16 - .../zstd/internal/xxhash/xxhash_other.go | 76 - .../zstd/internal/xxhash/xxhash_safe.go | 11 - .../klauspost/compress/zstd/matchlen_amd64.go | 16 - .../klauspost/compress/zstd/matchlen_amd64.s | 68 - .../compress/zstd/matchlen_generic.go | 33 - .../klauspost/compress/zstd/seqdec.go | 503 -- .../klauspost/compress/zstd/seqdec_amd64.go | 394 - .../klauspost/compress/zstd/seqdec_amd64.s | 4175 --------- .../klauspost/compress/zstd/seqdec_generic.go | 237 - .../klauspost/compress/zstd/seqenc.go | 114 - .../klauspost/compress/zstd/snappy.go | 434 - .../github.com/klauspost/compress/zstd/zip.go | 141 - .../klauspost/compress/zstd/zstd.go | 121 - .../github.com/montanaflynn/stats/.gitignore | 2 - .../github.com/montanaflynn/stats/.travis.yml | 20 - .../montanaflynn/stats/CHANGELOG.md | 64 - .../github.com/montanaflynn/stats/LICENSE | 21 - .../github.com/montanaflynn/stats/Makefile | 29 - .../github.com/montanaflynn/stats/README.md | 103 - .../montanaflynn/stats/correlation.go | 33 - .../github.com/montanaflynn/stats/data.go | 140 - .../montanaflynn/stats/data_set_distances.go | 94 - .../montanaflynn/stats/deviation.go | 57 - .../github.com/montanaflynn/stats/errors.go | 22 - .../github.com/montanaflynn/stats/legacy.go | 36 - .../github.com/montanaflynn/stats/load.go | 184 - .../github.com/montanaflynn/stats/max.go | 24 - .../github.com/montanaflynn/stats/mean.go | 60 - .../github.com/montanaflynn/stats/median.go | 25 - .../github.com/montanaflynn/stats/min.go | 26 - .../github.com/montanaflynn/stats/mode.go | 47 - .../github.com/montanaflynn/stats/outlier.go | 44 - .../montanaflynn/stats/percentile.go | 80 - .../github.com/montanaflynn/stats/quartile.go | 74 - .../montanaflynn/stats/regression.go | 113 - .../github.com/montanaflynn/stats/round.go | 38 - .../github.com/montanaflynn/stats/sample.go | 44 - .../github.com/montanaflynn/stats/sum.go | 18 - .../github.com/montanaflynn/stats/util.go | 43 - .../github.com/montanaflynn/stats/variance.go | 105 - .../github.com/nats-io/nats.go/.gitignore | 45 - .../github.com/nats-io/nats.go/.golangci.yaml | 16 - .../github.com/nats-io/nats.go/.travis.yml | 36 - .../vendor/github.com/nats-io/nats.go/.words | 106 - .../github.com/nats-io/nats.go/.words.readme | 25 - .../nats-io/nats.go/CODE-OF-CONDUCT.md | 3 - .../nats-io/nats.go/CONTRIBUTING.md | 45 - .../github.com/nats-io/nats.go/GOVERNANCE.md | 3 - .../vendor/github.com/nats-io/nats.go/LICENSE | 201 - .../github.com/nats-io/nats.go/MAINTAINERS.md | 8 - .../github.com/nats-io/nats.go/README.md | 482 - .../github.com/nats-io/nats.go/context.go | 244 - .../nats-io/nats.go/dependencies.md | 15 - .../vendor/github.com/nats-io/nats.go/enc.go | 269 - .../nats.go/encoders/builtin/default_enc.go | 117 - .../nats.go/encoders/builtin/gob_enc.go | 45 - .../nats.go/encoders/builtin/json_enc.go | 56 - .../github.com/nats-io/nats.go/go_test.mod | 22 - .../github.com/nats-io/nats.go/go_test.sum | 47 - .../nats-io/nats.go/internal/parser/parse.go | 104 - .../nats-io/nats.go/jetstream/README.md | 989 --- .../nats-io/nats.go/jetstream/api.go | 151 - .../nats-io/nats.go/jetstream/consumer.go | 325 - .../nats.go/jetstream/consumer_config.go | 460 - .../nats-io/nats.go/jetstream/errors.go | 417 - .../nats-io/nats.go/jetstream/jetstream.go | 1048 --- .../nats.go/jetstream/jetstream_options.go | 408 - .../nats-io/nats.go/jetstream/kv.go | 1338 --- .../nats-io/nats.go/jetstream/kv_options.go | 103 - .../nats-io/nats.go/jetstream/message.go | 457 - .../nats-io/nats.go/jetstream/object.go | 1600 ---- .../nats.go/jetstream/object_options.go | 41 - .../nats-io/nats.go/jetstream/ordered.go | 624 -- .../nats-io/nats.go/jetstream/publish.go | 563 -- .../nats-io/nats.go/jetstream/pull.go | 1154 --- .../nats-io/nats.go/jetstream/stream.go | 719 -- .../nats.go/jetstream/stream_config.go | 606 -- .../vendor/github.com/nats-io/nats.go/js.go | 3848 -------- .../github.com/nats-io/nats.go/jserrors.go | 235 - .../vendor/github.com/nats-io/nats.go/jsm.go | 1675 ---- .../vendor/github.com/nats-io/nats.go/kv.go | 1178 --- .../nats-io/nats.go/legacy_jetstream.md | 83 - .../vendor/github.com/nats-io/nats.go/nats.go | 5694 ------------ .../github.com/nats-io/nats.go/netchan.go | 111 - .../github.com/nats-io/nats.go/object.go | 1422 --- .../github.com/nats-io/nats.go/parser.go | 554 -- .../vendor/github.com/nats-io/nats.go/rand.go | 29 - .../nats-io/nats.go/testing_internal.go | 59 - .../github.com/nats-io/nats.go/timer.go | 56 - .../github.com/nats-io/nats.go/util/tls.go | 28 - .../nats-io/nats.go/util/tls_go17.go | 50 - .../vendor/github.com/nats-io/nats.go/ws.go | 780 -- .../github.com/nats-io/nkeys/.gitignore | 16 - .../github.com/nats-io/nkeys/.goreleaser.yml | 63 - .../github.com/nats-io/nkeys/GOVERNANCE.md | 3 - .../vendor/github.com/nats-io/nkeys/LICENSE | 201 - .../github.com/nats-io/nkeys/MAINTAINERS.md | 8 - .../vendor/github.com/nats-io/nkeys/README.md | 68 - .../vendor/github.com/nats-io/nkeys/TODO.md | 5 - .../vendor/github.com/nats-io/nkeys/crc16.go | 68 - .../github.com/nats-io/nkeys/creds_utils.go | 78 - .../github.com/nats-io/nkeys/dependencies.md | 12 - .../vendor/github.com/nats-io/nkeys/errors.go | 50 - .../github.com/nats-io/nkeys/keypair.go | 146 - .../vendor/github.com/nats-io/nkeys/nkeys.go | 100 - .../vendor/github.com/nats-io/nkeys/public.go | 86 - .../vendor/github.com/nats-io/nkeys/strkey.go | 314 - .../vendor/github.com/nats-io/nkeys/xkeys.go | 185 - .../vendor/github.com/nats-io/nuid/.gitignore | 24 - .../github.com/nats-io/nuid/.travis.yml | 17 - .../github.com/nats-io/nuid/GOVERNANCE.md | 3 - .../vendor/github.com/nats-io/nuid/LICENSE | 201 - .../github.com/nats-io/nuid/MAINTAINERS.md | 6 - .../vendor/github.com/nats-io/nuid/README.md | 47 - .../vendor/github.com/nats-io/nuid/nuid.go | 135 - .../vendor/github.com/pkg/errors/.gitignore | 24 - .../vendor/github.com/pkg/errors/.travis.yml | 10 - .../vendor/github.com/pkg/errors/LICENSE | 23 - .../vendor/github.com/pkg/errors/Makefile | 44 - .../vendor/github.com/pkg/errors/README.md | 59 - .../vendor/github.com/pkg/errors/appveyor.yml | 32 - .../vendor/github.com/pkg/errors/errors.go | 288 - .../vendor/github.com/pkg/errors/go113.go | 38 - .../vendor/github.com/pkg/errors/stack.go | 177 - .../vendor/github.com/rs/cors/LICENSE | 19 - .../vendor/github.com/rs/cors/README.md | 116 - .../vendor/github.com/rs/cors/cors.go | 462 - .../vendor/github.com/rs/cors/utils.go | 71 - .../github.com/xdg-go/pbkdf2/.gitignore | 12 - .../vendor/github.com/xdg-go/pbkdf2/LICENSE | 175 - .../vendor/github.com/xdg-go/pbkdf2/README.md | 17 - .../vendor/github.com/xdg-go/pbkdf2/pbkdf2.go | 76 - .../vendor/github.com/xdg-go/scram/.gitignore | 0 .../github.com/xdg-go/scram/CHANGELOG.md | 22 - .../vendor/github.com/xdg-go/scram/LICENSE | 175 - .../vendor/github.com/xdg-go/scram/README.md | 72 - .../vendor/github.com/xdg-go/scram/client.go | 130 - .../github.com/xdg-go/scram/client_conv.go | 149 - .../vendor/github.com/xdg-go/scram/common.go | 97 - .../vendor/github.com/xdg-go/scram/doc.go | 26 - .../vendor/github.com/xdg-go/scram/parse.go | 205 - .../vendor/github.com/xdg-go/scram/scram.go | 71 - .../vendor/github.com/xdg-go/scram/server.go | 50 - .../github.com/xdg-go/scram/server_conv.go | 151 - .../github.com/xdg-go/stringprep/.gitignore | 0 .../github.com/xdg-go/stringprep/CHANGELOG.md | 29 - .../github.com/xdg-go/stringprep/LICENSE | 175 - .../github.com/xdg-go/stringprep/README.md | 28 - .../github.com/xdg-go/stringprep/bidi.go | 73 - .../github.com/xdg-go/stringprep/doc.go | 10 - .../github.com/xdg-go/stringprep/error.go | 14 - .../github.com/xdg-go/stringprep/map.go | 21 - .../github.com/xdg-go/stringprep/profile.go | 75 - .../github.com/xdg-go/stringprep/saslprep.go | 52 - .../github.com/xdg-go/stringprep/set.go | 36 - .../github.com/xdg-go/stringprep/tables.go | 3215 ------- .../github.com/youmark/pkcs8/.gitignore | 23 - .../github.com/youmark/pkcs8/.travis.yml | 9 - .../vendor/github.com/youmark/pkcs8/LICENSE | 21 - .../vendor/github.com/youmark/pkcs8/README | 1 - .../vendor/github.com/youmark/pkcs8/README.md | 21 - .../vendor/github.com/youmark/pkcs8/pkcs8.go | 305 - .../go.mongodb.org/mongo-driver/LICENSE | 201 - .../go.mongodb.org/mongo-driver/bson/bson.go | 50 - .../bson/bsoncodec/array_codec.go | 50 - .../mongo-driver/bson/bsoncodec/bsoncodec.go | 238 - .../bson/bsoncodec/byte_slice_codec.go | 111 - .../bson/bsoncodec/cond_addr_codec.go | 63 - .../bson/bsoncodec/default_value_decoders.go | 1729 ---- .../bson/bsoncodec/default_value_encoders.go | 766 -- .../mongo-driver/bson/bsoncodec/doc.go | 90 - .../bson/bsoncodec/empty_interface_codec.go | 147 - .../mongo-driver/bson/bsoncodec/map_codec.go | 309 - .../mongo-driver/bson/bsoncodec/mode.go | 65 - .../bson/bsoncodec/pointer_codec.go | 109 - .../mongo-driver/bson/bsoncodec/proxy.go | 14 - .../mongo-driver/bson/bsoncodec/registry.go | 469 - .../bson/bsoncodec/slice_codec.go | 199 - .../bson/bsoncodec/string_codec.go | 119 - .../bson/bsoncodec/struct_codec.go | 664 -- .../bson/bsoncodec/struct_tag_parser.go | 139 - .../mongo-driver/bson/bsoncodec/time_codec.go | 127 - .../mongo-driver/bson/bsoncodec/types.go | 57 - .../mongo-driver/bson/bsoncodec/uint_codec.go | 173 - .../bsonoptions/byte_slice_codec_options.go | 38 - .../mongo-driver/bson/bsonoptions/doc.go | 8 - .../empty_interface_codec_options.go | 38 - .../bson/bsonoptions/map_codec_options.go | 67 - .../bson/bsonoptions/slice_codec_options.go | 38 - .../bson/bsonoptions/string_codec_options.go | 41 - .../bson/bsonoptions/struct_codec_options.go | 87 - .../bson/bsonoptions/time_codec_options.go | 38 - .../bson/bsonoptions/uint_codec_options.go | 38 - .../mongo-driver/bson/bsonrw/copier.go | 445 - .../mongo-driver/bson/bsonrw/doc.go | 9 - .../bson/bsonrw/extjson_parser.go | 806 -- .../bson/bsonrw/extjson_reader.go | 644 -- .../bson/bsonrw/extjson_tables.go | 223 - .../bson/bsonrw/extjson_wrappers.go | 492 - .../bson/bsonrw/extjson_writer.go | 732 -- .../mongo-driver/bson/bsonrw/json_scanner.go | 528 -- .../mongo-driver/bson/bsonrw/mode.go | 108 - .../mongo-driver/bson/bsonrw/reader.go | 63 - .../mongo-driver/bson/bsonrw/value_reader.go | 874 -- .../mongo-driver/bson/bsonrw/value_writer.go | 606 -- .../mongo-driver/bson/bsonrw/writer.go | 78 - .../mongo-driver/bson/bsontype/bsontype.go | 97 - .../mongo-driver/bson/decoder.go | 141 - .../go.mongodb.org/mongo-driver/bson/doc.go | 141 - .../mongo-driver/bson/encoder.go | 99 - .../mongo-driver/bson/marshal.go | 248 - .../mongo-driver/bson/primitive/decimal.go | 423 - .../mongo-driver/bson/primitive/objectid.go | 206 - .../mongo-driver/bson/primitive/primitive.go | 217 - .../mongo-driver/bson/primitive_codecs.go | 92 - .../go.mongodb.org/mongo-driver/bson/raw.go | 85 - .../mongo-driver/bson/raw_element.go | 51 - .../mongo-driver/bson/raw_value.go | 309 - .../mongo-driver/bson/registry.go | 24 - .../go.mongodb.org/mongo-driver/bson/types.go | 36 - .../mongo-driver/bson/unmarshal.go | 101 - .../go.mongodb.org/mongo-driver/event/doc.go | 56 - .../mongo-driver/event/monitoring.go | 190 - .../internal/background_context.go | 34 - .../internal/cancellation_listener.go | 47 - .../mongo-driver/internal/const.go | 19 - .../mongo-driver/internal/csfle_util.go | 39 - .../mongo-driver/internal/csot_util.go | 58 - .../mongo-driver/internal/error.go | 123 - .../mongo-driver/internal/http.go | 38 - .../internal/randutil/rand/bits.go | 38 - .../internal/randutil/rand/exp.go | 223 - .../internal/randutil/rand/normal.go | 158 - .../internal/randutil/rand/rand.go | 374 - .../internal/randutil/rand/rng.go | 93 - .../internal/randutil/randutil.go | 39 - .../mongo-driver/internal/string_util.go | 45 - .../internal/uri_validation_errors.go | 22 - .../mongo-driver/internal/uuid/uuid.go | 53 - .../mongo-driver/mongo/address/addr.go | 50 - .../mongo-driver/mongo/batch_cursor.go | 48 - .../mongo-driver/mongo/bulk_write.go | 532 -- .../mongo-driver/mongo/bulk_write_models.go | 305 - .../mongo-driver/mongo/change_stream.go | 707 -- .../mongo/change_stream_deployment.go | 49 - .../mongo-driver/mongo/client.go | 819 -- .../mongo-driver/mongo/client_encryption.go | 315 - .../mongo-driver/mongo/collection.go | 1842 ---- .../mongo-driver/mongo/crypt_retrievers.go | 65 - .../mongo-driver/mongo/cursor.go | 325 - .../mongo-driver/mongo/database.go | 795 -- .../mongo/description/description.go | 11 - .../mongo-driver/mongo/description/server.go | 488 - .../mongo/description/server_kind.go | 46 - .../mongo/description/server_selector.go | 341 - .../mongo/description/topology.go | 142 - .../mongo/description/topology_kind.go | 40 - .../mongo/description/topology_version.go | 66 - .../mongo/description/version_range.go | 42 - .../go.mongodb.org/mongo-driver/mongo/doc.go | 152 - .../mongo-driver/mongo/errors.go | 665 -- .../mongo/index_options_builder.go | 176 - .../mongo-driver/mongo/index_view.go | 486 - .../mongo-driver/mongo/mongo.go | 401 - .../mongo-driver/mongo/mongocryptd.go | 162 - .../mongo/options/aggregateoptions.go | 181 - .../mongo/options/autoencryptionoptions.go | 209 - .../mongo/options/bulkwriteoptions.go | 91 - .../mongo/options/changestreamoptions.go | 205 - .../mongo/options/clientencryptionoptions.go | 147 - .../mongo/options/clientoptions.go | 1153 --- .../mongo/options/collectionoptions.go | 88 - .../mongo/options/countoptions.go | 119 - .../mongo/options/createcollectionoptions.go | 326 - .../mongo/options/datakeyoptions.go | 101 - .../mongo-driver/mongo/options/dboptions.go | 88 - .../mongo/options/deleteoptions.go | 86 - .../mongo/options/distinctoptions.go | 78 - .../mongo-driver/mongo/options/doc.go | 8 - .../mongo/options/encryptoptions.go | 103 - .../mongo/options/estimatedcountoptions.go | 64 - .../mongo-driver/mongo/options/findoptions.go | 1095 --- .../mongo/options/gridfsoptions.go | 329 - .../mongo/options/indexoptions.go | 482 - .../mongo/options/insertoptions.go | 119 - .../mongo/options/listcollectionsoptions.go | 66 - .../mongo/options/listdatabasesoptions.go | 55 - .../mongo/options/mongooptions.go | 165 - .../mongo/options/replaceoptions.go | 115 - .../mongo/options/rewrapdatakeyoptions.go | 52 - .../mongo/options/runcmdoptions.go | 42 - .../mongo/options/serverapioptions.go | 60 - .../mongo/options/sessionoptions.go | 131 - .../mongo/options/transactionoptions.go | 100 - .../mongo/options/updateoptions.go | 128 - .../mongo/readconcern/readconcern.go | 83 - .../mongo-driver/mongo/readpref/mode.go | 88 - .../mongo-driver/mongo/readpref/options.go | 71 - .../mongo-driver/mongo/readpref/readpref.go | 135 - .../mongo-driver/mongo/results.go | 281 - .../mongo-driver/mongo/session.go | 385 - .../mongo-driver/mongo/single_result.go | 121 - .../go.mongodb.org/mongo-driver/mongo/util.go | 7 - .../mongo/writeconcern/writeconcern.go | 227 - .../go.mongodb.org/mongo-driver/tag/tag.go | 80 - .../mongo-driver/version/version.go | 11 - .../mongo-driver/x/bsonx/array.go | 97 - .../mongo-driver/x/bsonx/bsoncore/array.go | 164 - .../x/bsonx/bsoncore/bson_arraybuilder.go | 201 - .../x/bsonx/bsoncore/bson_documentbuilder.go | 189 - .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 862 -- .../mongo-driver/x/bsonx/bsoncore/document.go | 386 - .../x/bsonx/bsoncore/document_sequence.go | 189 - .../mongo-driver/x/bsonx/bsoncore/element.go | 152 - .../mongo-driver/x/bsonx/bsoncore/tables.go | 223 - .../mongo-driver/x/bsonx/bsoncore/value.go | 980 -- .../mongo-driver/x/bsonx/constructor.go | 166 - .../mongo-driver/x/bsonx/document.go | 305 - .../mongo-driver/x/bsonx/element.go | 51 - .../mongo-driver/x/bsonx/mdocument.go | 231 - .../mongo-driver/x/bsonx/primitive_codecs.go | 637 -- .../x/bsonx/reflectionfree_d_codec.go | 1025 --- .../mongo-driver/x/bsonx/registry.go | 28 - .../mongo-driver/x/bsonx/value.go | 866 -- .../mongo-driver/x/mongo/driver/DESIGN.md | 23 - .../mongo-driver/x/mongo/driver/auth/auth.go | 229 - .../x/mongo/driver/auth/aws_conv.go | 348 - .../x/mongo/driver/auth/conversation.go | 31 - .../mongo-driver/x/mongo/driver/auth/cred.go | 16 - .../x/mongo/driver/auth/default.go | 98 - .../mongo-driver/x/mongo/driver/auth/doc.go | 23 - .../x/mongo/driver/auth/gssapi.go | 59 - .../x/mongo/driver/auth/gssapi_not_enabled.go | 17 - .../mongo/driver/auth/gssapi_not_supported.go | 22 - .../driver/auth/internal/awsv4/credentials.go | 63 - .../x/mongo/driver/auth/internal/awsv4/doc.go | 15 - .../driver/auth/internal/awsv4/request.go | 80 - .../mongo/driver/auth/internal/awsv4/rest.go | 46 - .../mongo/driver/auth/internal/awsv4/rules.go | 98 - .../driver/auth/internal/awsv4/signer.go | 472 - .../mongo/driver/auth/internal/gssapi/gss.go | 167 - .../driver/auth/internal/gssapi/gss_wrapper.c | 254 - .../driver/auth/internal/gssapi/gss_wrapper.h | 72 - .../mongo/driver/auth/internal/gssapi/sspi.go | 353 - .../auth/internal/gssapi/sspi_wrapper.c | 249 - .../auth/internal/gssapi/sspi_wrapper.h | 64 - .../x/mongo/driver/auth/mongodbaws.go | 82 - .../x/mongo/driver/auth/mongodbcr.go | 110 - .../mongo-driver/x/mongo/driver/auth/plain.go | 55 - .../mongo-driver/x/mongo/driver/auth/sasl.go | 174 - .../mongo-driver/x/mongo/driver/auth/scram.go | 130 - .../mongo-driver/x/mongo/driver/auth/util.go | 30 - .../mongo-driver/x/mongo/driver/auth/x509.go | 85 - .../x/mongo/driver/batch_cursor.go | 465 - .../mongo-driver/x/mongo/driver/batches.go | 76 - .../x/mongo/driver/compression.go | 111 - .../x/mongo/driver/connstring/connstring.go | 1036 --- .../mongo-driver/x/mongo/driver/crypt.go | 473 - .../mongo-driver/x/mongo/driver/dns/dns.go | 144 - .../mongo-driver/x/mongo/driver/driver.go | 274 - .../mongo-driver/x/mongo/driver/errors.go | 520 -- .../mongo-driver/x/mongo/driver/legacy.go | 22 - .../driver/list_collections_batch_cursor.go | 129 - .../x/mongo/driver/mongocrypt/binary.go | 56 - .../x/mongo/driver/mongocrypt/errors.go | 44 - .../driver/mongocrypt/errors_not_enabled.go | 21 - .../x/mongo/driver/mongocrypt/mongocrypt.go | 416 - .../driver/mongocrypt/mongocrypt_context.go | 115 - .../mongocrypt_context_not_enabled.go | 62 - .../mongocrypt/mongocrypt_kms_context.go | 76 - .../mongocrypt_kms_context_not_enabled.go | 39 - .../mongocrypt/mongocrypt_not_enabled.go | 83 - .../options/mongocrypt_context_options.go | 139 - .../mongocrypt/options/mongocrypt_options.go | 63 - .../x/mongo/driver/mongocrypt/state.go | 47 - .../mongo-driver/x/mongo/driver/ocsp/cache.go | 121 - .../x/mongo/driver/ocsp/config.go | 68 - .../mongo-driver/x/mongo/driver/ocsp/ocsp.go | 321 - .../x/mongo/driver/ocsp/options.go | 16 - .../mongo-driver/x/mongo/driver/operation.go | 1789 ---- .../driver/operation/abort_transaction.go | 199 - .../x/mongo/driver/operation/aggregate.go | 419 - .../x/mongo/driver/operation/command.go | 220 - .../driver/operation/commit_transaction.go | 201 - .../x/mongo/driver/operation/count.go | 311 - .../x/mongo/driver/operation/create.go | 402 - .../x/mongo/driver/operation/createIndexes.go | 278 - .../x/mongo/driver/operation/delete.go | 314 - .../x/mongo/driver/operation/distinct.go | 311 - .../mongo/driver/operation/drop_collection.go | 222 - .../x/mongo/driver/operation/drop_database.go | 154 - .../x/mongo/driver/operation/drop_indexes.go | 242 - .../x/mongo/driver/operation/end_sessions.go | 161 - .../x/mongo/driver/operation/errors.go | 13 - .../x/mongo/driver/operation/find.go | 548 -- .../mongo/driver/operation/find_and_modify.go | 477 - .../x/mongo/driver/operation/hello.go | 258 - .../x/mongo/driver/operation/insert.go | 293 - .../x/mongo/driver/operation/listDatabases.go | 327 - .../driver/operation/list_collections.go | 266 - .../x/mongo/driver/operation/list_indexes.go | 233 - .../x/mongo/driver/operation/update.go | 401 - .../x/mongo/driver/operation_exhaust.go | 37 - .../x/mongo/driver/serverapioptions.go | 36 - .../x/mongo/driver/session/client_session.go | 538 -- .../x/mongo/driver/session/cluster_clock.go | 36 - .../x/mongo/driver/session/options.go | 62 - .../x/mongo/driver/session/server_session.go | 74 - .../x/mongo/driver/session/session_pool.go | 192 - .../x/mongo/driver/topology/DESIGN.md | 40 - .../driver/topology/cancellation_listener.go | 14 - .../x/mongo/driver/topology/connection.go | 834 -- .../driver/topology/connection_legacy.go | 7 - .../driver/topology/connection_options.go | 214 - .../x/mongo/driver/topology/diff.go | 73 - .../x/mongo/driver/topology/errors.go | 111 - .../x/mongo/driver/topology/fsm.go | 438 - .../x/mongo/driver/topology/pool.go | 1135 --- .../topology/pool_generation_counter.go | 152 - .../x/mongo/driver/topology/rtt_monitor.go | 307 - .../x/mongo/driver/topology/server.go | 985 -- .../x/mongo/driver/topology/server_options.go | 195 - .../topology/tls_connection_source_1_16.go | 58 - .../topology/tls_connection_source_1_17.go | 47 - .../x/mongo/driver/topology/topology.go | 851 -- .../mongo/driver/topology/topology_options.go | 344 - .../x/mongo/driver/wiremessage/wiremessage.go | 600 -- .../vendor/golang.org/x/crypto/LICENSE | 27 - .../vendor/golang.org/x/crypto/PATENTS | 22 - .../golang.org/x/crypto/bcrypt/base64.go | 35 - .../golang.org/x/crypto/bcrypt/bcrypt.go | 304 - .../golang.org/x/crypto/blake2b/blake2b.go | 291 - .../x/crypto/blake2b/blake2bAVX2_amd64.go | 37 - .../x/crypto/blake2b/blake2bAVX2_amd64.s | 744 -- .../x/crypto/blake2b/blake2b_amd64.s | 278 - .../x/crypto/blake2b/blake2b_generic.go | 182 - .../x/crypto/blake2b/blake2b_ref.go | 11 - .../golang.org/x/crypto/blake2b/blake2x.go | 177 - .../golang.org/x/crypto/blake2b/register.go | 30 - .../golang.org/x/crypto/blowfish/block.go | 159 - .../golang.org/x/crypto/blowfish/cipher.go | 99 - .../golang.org/x/crypto/blowfish/const.go | 199 - .../x/crypto/curve25519/curve25519.go | 59 - .../x/crypto/curve25519/curve25519_compat.go | 105 - .../x/crypto/curve25519/curve25519_go120.go | 46 - .../x/crypto/curve25519/internal/field/README | 7 - .../x/crypto/curve25519/internal/field/fe.go | 416 - .../curve25519/internal/field/fe_amd64.go | 15 - .../curve25519/internal/field/fe_amd64.s | 378 - .../internal/field/fe_amd64_noasm.go | 11 - .../curve25519/internal/field/fe_arm64.go | 15 - .../curve25519/internal/field/fe_arm64.s | 42 - .../internal/field/fe_arm64_noasm.go | 11 - .../curve25519/internal/field/fe_generic.go | 264 - .../curve25519/internal/field/sync.checkpoint | 1 - .../crypto/curve25519/internal/field/sync.sh | 19 - .../golang.org/x/crypto/ed25519/ed25519.go | 71 - .../x/crypto/internal/alias/alias.go | 31 - .../x/crypto/internal/alias/alias_purego.go | 34 - .../x/crypto/internal/poly1305/mac_noasm.go | 9 - .../x/crypto/internal/poly1305/poly1305.go | 99 - .../x/crypto/internal/poly1305/sum_amd64.go | 47 - .../x/crypto/internal/poly1305/sum_amd64.s | 108 - .../x/crypto/internal/poly1305/sum_generic.go | 312 - .../x/crypto/internal/poly1305/sum_ppc64le.go | 47 - .../x/crypto/internal/poly1305/sum_ppc64le.s | 181 - .../x/crypto/internal/poly1305/sum_s390x.go | 76 - .../x/crypto/internal/poly1305/sum_s390x.s | 503 -- .../golang.org/x/crypto/nacl/box/box.go | 182 - .../x/crypto/nacl/secretbox/secretbox.go | 173 - .../vendor/golang.org/x/crypto/ocsp/ocsp.go | 792 -- .../golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 - .../x/crypto/salsa20/salsa/hsalsa20.go | 146 - .../x/crypto/salsa20/salsa/salsa208.go | 201 - .../x/crypto/salsa20/salsa/salsa20_amd64.go | 23 - .../x/crypto/salsa20/salsa/salsa20_amd64.s | 880 -- .../x/crypto/salsa20/salsa/salsa20_noasm.go | 14 - .../x/crypto/salsa20/salsa/salsa20_ref.go | 233 - .../vendor/golang.org/x/net/LICENSE | 27 - .../vendor/golang.org/x/net/PATENTS | 22 - .../golang.org/x/net/context/context.go | 56 - .../vendor/golang.org/x/net/context/go17.go | 73 - .../vendor/golang.org/x/net/context/go19.go | 21 - .../golang.org/x/net/context/pre_go17.go | 301 - .../golang.org/x/net/context/pre_go19.go | 110 - .../vendor/golang.org/x/sync/AUTHORS | 3 - .../vendor/golang.org/x/sync/CONTRIBUTORS | 3 - .../vendor/golang.org/x/sync/LICENSE | 27 - .../vendor/golang.org/x/sync/PATENTS | 22 - .../golang.org/x/sync/errgroup/errgroup.go | 66 - .../vendor/golang.org/x/sys/LICENSE | 27 - .../vendor/golang.org/x/sys/PATENTS | 22 - .../golang.org/x/sys/cpu/asm_aix_ppc64.s | 17 - .../vendor/golang.org/x/sys/cpu/byteorder.go | 66 - .../vendor/golang.org/x/sys/cpu/cpu.go | 290 - .../vendor/golang.org/x/sys/cpu/cpu_aix.go | 33 - .../vendor/golang.org/x/sys/cpu/cpu_arm.go | 73 - .../vendor/golang.org/x/sys/cpu/cpu_arm64.go | 172 - .../vendor/golang.org/x/sys/cpu/cpu_arm64.s | 31 - .../golang.org/x/sys/cpu/cpu_gc_arm64.go | 11 - .../golang.org/x/sys/cpu/cpu_gc_s390x.go | 21 - .../vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 15 - .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 11 - .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 22 - .../golang.org/x/sys/cpu/cpu_gccgo_x86.c | 37 - .../golang.org/x/sys/cpu/cpu_gccgo_x86.go | 31 - .../vendor/golang.org/x/sys/cpu/cpu_linux.go | 15 - .../golang.org/x/sys/cpu/cpu_linux_arm.go | 39 - .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 111 - .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 22 - .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 9 - .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 30 - .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 40 - .../golang.org/x/sys/cpu/cpu_loong64.go | 12 - .../golang.org/x/sys/cpu/cpu_mips64x.go | 15 - .../vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 11 - .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 - .../golang.org/x/sys/cpu/cpu_openbsd_arm64.go | 65 - .../golang.org/x/sys/cpu/cpu_openbsd_arm64.s | 11 - .../golang.org/x/sys/cpu/cpu_other_arm.go | 9 - .../golang.org/x/sys/cpu/cpu_other_arm64.go | 9 - .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 11 - .../golang.org/x/sys/cpu/cpu_other_ppc64x.go | 12 - .../golang.org/x/sys/cpu/cpu_other_riscv64.go | 11 - .../vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 16 - .../golang.org/x/sys/cpu/cpu_riscv64.go | 11 - .../vendor/golang.org/x/sys/cpu/cpu_s390x.go | 172 - .../vendor/golang.org/x/sys/cpu/cpu_s390x.s | 57 - .../vendor/golang.org/x/sys/cpu/cpu_wasm.go | 17 - .../vendor/golang.org/x/sys/cpu/cpu_x86.go | 151 - .../vendor/golang.org/x/sys/cpu/cpu_x86.s | 26 - .../vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 - .../golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 - .../vendor/golang.org/x/sys/cpu/endian_big.go | 10 - .../golang.org/x/sys/cpu/endian_little.go | 10 - .../golang.org/x/sys/cpu/hwcap_linux.go | 71 - .../vendor/golang.org/x/sys/cpu/parse.go | 43 - .../x/sys/cpu/proc_cpuinfo_linux.go | 53 - .../golang.org/x/sys/cpu/runtime_auxv.go | 16 - .../x/sys/cpu/runtime_auxv_go121.go | 18 - .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 26 - .../x/sys/cpu/syscall_aix_ppc64_gc.go | 35 - .../vendor/golang.org/x/text/LICENSE | 27 - .../vendor/golang.org/x/text/PATENTS | 22 - .../vendor/golang.org/x/text/cases/cases.go | 162 - .../vendor/golang.org/x/text/cases/context.go | 376 - .../vendor/golang.org/x/text/cases/fold.go | 34 - .../vendor/golang.org/x/text/cases/icu.go | 61 - .../vendor/golang.org/x/text/cases/info.go | 82 - .../vendor/golang.org/x/text/cases/map.go | 816 -- .../golang.org/x/text/cases/tables10.0.0.go | 2255 ----- .../golang.org/x/text/cases/tables11.0.0.go | 2316 ----- .../golang.org/x/text/cases/tables12.0.0.go | 2359 ----- .../golang.org/x/text/cases/tables13.0.0.go | 2399 ----- .../golang.org/x/text/cases/tables15.0.0.go | 2527 ------ .../golang.org/x/text/cases/tables9.0.0.go | 2215 ----- .../vendor/golang.org/x/text/cases/trieval.go | 217 - .../golang.org/x/text/internal/internal.go | 49 - .../x/text/internal/language/common.go | 16 - .../x/text/internal/language/compact.go | 29 - .../text/internal/language/compact/compact.go | 61 - .../internal/language/compact/language.go | 260 - .../text/internal/language/compact/parents.go | 120 - .../text/internal/language/compact/tables.go | 1015 --- .../x/text/internal/language/compact/tags.go | 91 - .../x/text/internal/language/compose.go | 167 - .../x/text/internal/language/coverage.go | 28 - .../x/text/internal/language/language.go | 627 -- .../x/text/internal/language/lookup.go | 412 - .../x/text/internal/language/match.go | 226 - .../x/text/internal/language/parse.go | 608 -- .../x/text/internal/language/tables.go | 3494 -------- .../x/text/internal/language/tags.go | 48 - .../golang.org/x/text/internal/match.go | 67 - .../golang.org/x/text/internal/tag/tag.go | 100 - .../golang.org/x/text/language/coverage.go | 187 - .../vendor/golang.org/x/text/language/doc.go | 98 - .../golang.org/x/text/language/language.go | 605 -- .../golang.org/x/text/language/match.go | 735 -- .../golang.org/x/text/language/parse.go | 256 - .../golang.org/x/text/language/tables.go | 298 - .../vendor/golang.org/x/text/language/tags.go | 145 - .../golang.org/x/text/transform/transform.go | 709 -- .../x/text/unicode/norm/composition.go | 512 -- .../x/text/unicode/norm/forminfo.go | 279 - .../golang.org/x/text/unicode/norm/input.go | 109 - .../golang.org/x/text/unicode/norm/iter.go | 458 - .../x/text/unicode/norm/normalize.go | 610 -- .../x/text/unicode/norm/readwriter.go | 125 - .../x/text/unicode/norm/tables10.0.0.go | 7657 ---------------- .../x/text/unicode/norm/tables11.0.0.go | 7693 ---------------- .../x/text/unicode/norm/tables12.0.0.go | 7710 ---------------- .../x/text/unicode/norm/tables13.0.0.go | 7760 ---------------- .../x/text/unicode/norm/tables15.0.0.go | 7907 ----------------- .../x/text/unicode/norm/tables9.0.0.go | 7637 ---------------- .../x/text/unicode/norm/transform.go | 88 - .../golang.org/x/text/unicode/norm/trie.go | 54 - .../vendor/google.golang.org/protobuf/LICENSE | 27 - .../vendor/google.golang.org/protobuf/PATENTS | 22 - .../protobuf/encoding/prototext/decode.go | 772 -- .../protobuf/encoding/prototext/doc.go | 7 - .../protobuf/encoding/prototext/encode.go | 376 - .../protobuf/encoding/protowire/wire.go | 547 -- .../protobuf/internal/descfmt/stringer.go | 413 - .../protobuf/internal/descopts/options.go | 29 - .../protobuf/internal/detrand/rand.go | 69 - .../internal/editiondefaults/defaults.go | 12 - .../editiondefaults/editions_defaults.binpb | 4 - .../internal/encoding/defval/default.go | 213 - .../encoding/messageset/messageset.go | 242 - .../protobuf/internal/encoding/tag/tag.go | 207 - .../protobuf/internal/encoding/text/decode.go | 686 -- .../internal/encoding/text/decode_number.go | 211 - .../internal/encoding/text/decode_string.go | 161 - .../internal/encoding/text/decode_token.go | 373 - .../protobuf/internal/encoding/text/doc.go | 29 - .../protobuf/internal/encoding/text/encode.go | 272 - .../protobuf/internal/errors/errors.go | 89 - .../protobuf/internal/errors/is_go112.go | 40 - .../protobuf/internal/errors/is_go113.go | 13 - .../protobuf/internal/filedesc/build.go | 157 - .../protobuf/internal/filedesc/desc.go | 703 -- .../protobuf/internal/filedesc/desc_init.go | 523 -- .../protobuf/internal/filedesc/desc_lazy.go | 732 -- .../protobuf/internal/filedesc/desc_list.go | 457 - .../internal/filedesc/desc_list_gen.go | 356 - .../protobuf/internal/filedesc/editions.go | 142 - .../protobuf/internal/filedesc/placeholder.go | 109 - .../protobuf/internal/filetype/build.go | 296 - .../protobuf/internal/flags/flags.go | 24 - .../internal/flags/proto_legacy_disable.go | 10 - .../internal/flags/proto_legacy_enable.go | 10 - .../protobuf/internal/genid/any_gen.go | 34 - .../protobuf/internal/genid/api_gen.go | 106 - .../protobuf/internal/genid/descriptor_gen.go | 1233 --- .../protobuf/internal/genid/doc.go | 11 - .../protobuf/internal/genid/duration_gen.go | 34 - .../protobuf/internal/genid/empty_gen.go | 19 - .../protobuf/internal/genid/field_mask_gen.go | 31 - .../internal/genid/go_features_gen.go | 31 - .../protobuf/internal/genid/goname.go | 25 - .../protobuf/internal/genid/map_entry.go | 16 - .../internal/genid/source_context_gen.go | 31 - .../protobuf/internal/genid/struct_gen.go | 121 - .../protobuf/internal/genid/timestamp_gen.go | 34 - .../protobuf/internal/genid/type_gen.go | 228 - .../protobuf/internal/genid/wrappers.go | 13 - .../protobuf/internal/genid/wrappers_gen.go | 175 - .../protobuf/internal/impl/api_export.go | 177 - .../protobuf/internal/impl/checkinit.go | 141 - .../protobuf/internal/impl/codec_extension.go | 215 - .../protobuf/internal/impl/codec_field.go | 830 -- .../protobuf/internal/impl/codec_gen.go | 5724 ------------ .../protobuf/internal/impl/codec_map.go | 388 - .../protobuf/internal/impl/codec_map_go111.go | 38 - .../protobuf/internal/impl/codec_map_go112.go | 12 - .../protobuf/internal/impl/codec_message.go | 217 - .../internal/impl/codec_messageset.go | 123 - .../protobuf/internal/impl/codec_reflect.go | 210 - .../protobuf/internal/impl/codec_tables.go | 557 -- .../protobuf/internal/impl/codec_unsafe.go | 18 - .../protobuf/internal/impl/convert.go | 495 -- .../protobuf/internal/impl/convert_list.go | 141 - .../protobuf/internal/impl/convert_map.go | 121 - .../protobuf/internal/impl/decode.go | 285 - .../protobuf/internal/impl/encode.go | 201 - .../protobuf/internal/impl/enum.go | 21 - .../protobuf/internal/impl/extension.go | 156 - .../protobuf/internal/impl/legacy_enum.go | 218 - .../protobuf/internal/impl/legacy_export.go | 92 - .../internal/impl/legacy_extension.go | 176 - .../protobuf/internal/impl/legacy_file.go | 81 - .../protobuf/internal/impl/legacy_message.go | 568 -- .../protobuf/internal/impl/merge.go | 176 - .../protobuf/internal/impl/merge_gen.go | 209 - .../protobuf/internal/impl/message.go | 284 - .../protobuf/internal/impl/message_reflect.go | 463 - .../internal/impl/message_reflect_field.go | 543 -- .../internal/impl/message_reflect_gen.go | 249 - .../protobuf/internal/impl/pointer_reflect.go | 215 - .../protobuf/internal/impl/pointer_unsafe.go | 215 - .../protobuf/internal/impl/validate.go | 576 -- .../protobuf/internal/impl/weak.go | 74 - .../protobuf/internal/order/order.go | 89 - .../protobuf/internal/order/range.go | 115 - .../protobuf/internal/pragma/pragma.go | 29 - .../protobuf/internal/set/ints.go | 58 - .../protobuf/internal/strs/strings.go | 196 - .../protobuf/internal/strs/strings_pure.go | 28 - .../internal/strs/strings_unsafe_go120.go | 95 - .../internal/strs/strings_unsafe_go121.go | 74 - .../protobuf/internal/version/version.go | 79 - .../protobuf/proto/checkinit.go | 71 - .../protobuf/proto/decode.go | 294 - .../protobuf/proto/decode_gen.go | 603 -- .../google.golang.org/protobuf/proto/doc.go | 86 - .../protobuf/proto/encode.go | 322 - .../protobuf/proto/encode_gen.go | 97 - .../google.golang.org/protobuf/proto/equal.go | 57 - .../protobuf/proto/extension.go | 92 - .../google.golang.org/protobuf/proto/merge.go | 139 - .../protobuf/proto/messageset.go | 93 - .../google.golang.org/protobuf/proto/proto.go | 45 - .../protobuf/proto/proto_methods.go | 20 - .../protobuf/proto/proto_reflect.go | 20 - .../google.golang.org/protobuf/proto/reset.go | 43 - .../google.golang.org/protobuf/proto/size.go | 101 - .../protobuf/proto/size_gen.go | 55 - .../protobuf/proto/wrappers.go | 29 - .../protobuf/reflect/protoreflect/methods.go | 78 - .../protobuf/reflect/protoreflect/proto.go | 513 -- .../protobuf/reflect/protoreflect/source.go | 129 - .../reflect/protoreflect/source_gen.go | 552 -- .../protobuf/reflect/protoreflect/type.go | 666 -- .../protobuf/reflect/protoreflect/value.go | 285 - .../reflect/protoreflect/value_equal.go | 168 - .../reflect/protoreflect/value_pure.go | 60 - .../reflect/protoreflect/value_union.go | 438 - .../protoreflect/value_unsafe_go120.go | 99 - .../protoreflect/value_unsafe_go121.go | 87 - .../reflect/protoregistry/registry.go | 882 -- .../protobuf/runtime/protoiface/legacy.go | 15 - .../protobuf/runtime/protoiface/methods.go | 168 - .../protobuf/runtime/protoimpl/impl.go | 44 - .../protobuf/runtime/protoimpl/version.go | 60 - .../services/controller/vendor/modules.txt | 160 - 899 files changed, 15 insertions(+), 264983 deletions(-) delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/.gitignore delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/LICENSE delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/README.md delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/claims.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/doc.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/ed25519.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/errors.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/hmac.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/map_claims.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/none.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/parser.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/parser_option.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/rsa.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/signing_method.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/token.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/token_option.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/types.go delete mode 100644 backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/validator.go delete mode 100644 backend/services/controller/vendor/github.com/golang/snappy/.gitignore delete mode 100644 backend/services/controller/vendor/github.com/golang/snappy/AUTHORS delete mode 100644 backend/services/controller/vendor/github.com/golang/snappy/CONTRIBUTORS delete mode 100644 backend/services/controller/vendor/github.com/golang/snappy/LICENSE delete mode 100644 backend/services/controller/vendor/github.com/golang/snappy/README delete mode 100644 backend/services/controller/vendor/github.com/golang/snappy/decode.go delete mode 100644 backend/services/controller/vendor/github.com/golang/snappy/decode_amd64.go delete mode 100644 backend/services/controller/vendor/github.com/golang/snappy/decode_amd64.s delete mode 100644 backend/services/controller/vendor/github.com/golang/snappy/decode_other.go delete mode 100644 backend/services/controller/vendor/github.com/golang/snappy/encode.go delete mode 100644 backend/services/controller/vendor/github.com/golang/snappy/encode_amd64.go delete mode 100644 backend/services/controller/vendor/github.com/golang/snappy/encode_amd64.s delete mode 100644 backend/services/controller/vendor/github.com/golang/snappy/encode_other.go delete mode 100644 backend/services/controller/vendor/github.com/golang/snappy/snappy.go delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/.travis.yml delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/CONTRIBUTING.md delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/CONTRIBUTORS delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/LICENSE delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/README.md delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/dce.go delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/doc.go delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/hash.go delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/marshal.go delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/node.go delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/node_js.go delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/node_net.go delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/null.go delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/sql.go delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/time.go delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/util.go delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/uuid.go delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/version1.go delete mode 100644 backend/services/controller/vendor/github.com/google/uuid/version4.go delete mode 100644 backend/services/controller/vendor/github.com/gorilla/mux/AUTHORS delete mode 100644 backend/services/controller/vendor/github.com/gorilla/mux/LICENSE delete mode 100644 backend/services/controller/vendor/github.com/gorilla/mux/README.md delete mode 100644 backend/services/controller/vendor/github.com/gorilla/mux/doc.go delete mode 100644 backend/services/controller/vendor/github.com/gorilla/mux/middleware.go delete mode 100644 backend/services/controller/vendor/github.com/gorilla/mux/mux.go delete mode 100644 backend/services/controller/vendor/github.com/gorilla/mux/regexp.go delete mode 100644 backend/services/controller/vendor/github.com/gorilla/mux/route.go delete mode 100644 backend/services/controller/vendor/github.com/gorilla/mux/test_helpers.go delete mode 100644 backend/services/controller/vendor/github.com/joho/godotenv/.gitignore delete mode 100644 backend/services/controller/vendor/github.com/joho/godotenv/LICENCE delete mode 100644 backend/services/controller/vendor/github.com/joho/godotenv/README.md delete mode 100644 backend/services/controller/vendor/github.com/joho/godotenv/godotenv.go delete mode 100644 backend/services/controller/vendor/github.com/joho/godotenv/parser.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/.gitattributes delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/.gitignore delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/.goreleaser.yml delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/LICENSE delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/README.md delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/SECURITY.md delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/compressible.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/deflate.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/dict_decoder.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/fast_encoder.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/huffman_code.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/inflate.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/inflate_gen.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/level1.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/level2.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/level3.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/level4.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/level5.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/level6.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/matchlen_generic.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/regmask_amd64.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/regmask_other.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/stateless.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/flate/token.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/fse/README.md delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/fse/bitreader.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/fse/bitwriter.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/fse/bytereader.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/fse/compress.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/fse/decompress.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/fse/fse.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/gen.sh delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/huff0/.gitignore delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/huff0/README.md delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/huff0/bitreader.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/huff0/bitwriter.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/huff0/bytereader.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/huff0/compress.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/huff0/decompress.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/huff0/decompress_generic.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/huff0/huff0.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/LICENSE delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/decode.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/encode.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/snappy.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/s2sx.mod delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/s2sx.sum delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/README.md delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/bitreader.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/bitwriter.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/blockdec.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/blockenc.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/blocktype_string.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/bytebuf.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/bytereader.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/decodeheader.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/decoder.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/decoder_options.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/dict.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_base.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_best.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_better.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_dfast.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_fast.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/encoder.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/encoder_options.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/framedec.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/frameenc.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_decoder.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_encoder.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_predefined.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/hash.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/history.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqdec.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqenc.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/snappy.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/zip.go delete mode 100644 backend/services/controller/vendor/github.com/klauspost/compress/zstd/zstd.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/.gitignore delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/.travis.yml delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/CHANGELOG.md delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/LICENSE delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/Makefile delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/README.md delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/correlation.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/data.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/data_set_distances.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/deviation.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/errors.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/legacy.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/load.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/max.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/mean.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/median.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/min.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/mode.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/outlier.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/percentile.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/quartile.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/regression.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/round.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/sample.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/sum.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/util.go delete mode 100644 backend/services/controller/vendor/github.com/montanaflynn/stats/variance.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/.gitignore delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/.golangci.yaml delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/.travis.yml delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/.words delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/.words.readme delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/CODE-OF-CONDUCT.md delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/CONTRIBUTING.md delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/GOVERNANCE.md delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/LICENSE delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/MAINTAINERS.md delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/README.md delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/context.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/dependencies.md delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/enc.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/go_test.mod delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/go_test.sum delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/internal/parser/parse.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/README.md delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/api.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/consumer.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/consumer_config.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/errors.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/jetstream.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/jetstream_options.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/kv.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/kv_options.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/message.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/object.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/object_options.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/ordered.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/publish.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/pull.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/stream.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/stream_config.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/js.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jserrors.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/jsm.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/kv.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/legacy_jetstream.md delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/nats.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/netchan.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/object.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/parser.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/rand.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/testing_internal.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/timer.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/util/tls.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/util/tls_go17.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nats.go/ws.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nkeys/.gitignore delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nkeys/.goreleaser.yml delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nkeys/GOVERNANCE.md delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nkeys/LICENSE delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nkeys/MAINTAINERS.md delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nkeys/README.md delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nkeys/TODO.md delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nkeys/crc16.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nkeys/creds_utils.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nkeys/dependencies.md delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nkeys/errors.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nkeys/keypair.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nkeys/nkeys.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nkeys/public.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nkeys/strkey.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nkeys/xkeys.go delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nuid/.gitignore delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nuid/.travis.yml delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nuid/GOVERNANCE.md delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nuid/LICENSE delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nuid/MAINTAINERS.md delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nuid/README.md delete mode 100644 backend/services/controller/vendor/github.com/nats-io/nuid/nuid.go delete mode 100644 backend/services/controller/vendor/github.com/pkg/errors/.gitignore delete mode 100644 backend/services/controller/vendor/github.com/pkg/errors/.travis.yml delete mode 100644 backend/services/controller/vendor/github.com/pkg/errors/LICENSE delete mode 100644 backend/services/controller/vendor/github.com/pkg/errors/Makefile delete mode 100644 backend/services/controller/vendor/github.com/pkg/errors/README.md delete mode 100644 backend/services/controller/vendor/github.com/pkg/errors/appveyor.yml delete mode 100644 backend/services/controller/vendor/github.com/pkg/errors/errors.go delete mode 100644 backend/services/controller/vendor/github.com/pkg/errors/go113.go delete mode 100644 backend/services/controller/vendor/github.com/pkg/errors/stack.go delete mode 100644 backend/services/controller/vendor/github.com/rs/cors/LICENSE delete mode 100644 backend/services/controller/vendor/github.com/rs/cors/README.md delete mode 100644 backend/services/controller/vendor/github.com/rs/cors/cors.go delete mode 100644 backend/services/controller/vendor/github.com/rs/cors/utils.go delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/pbkdf2/.gitignore delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/pbkdf2/LICENSE delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/pbkdf2/README.md delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/pbkdf2/pbkdf2.go delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/scram/.gitignore delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/scram/CHANGELOG.md delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/scram/LICENSE delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/scram/README.md delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/scram/client.go delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/scram/client_conv.go delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/scram/common.go delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/scram/doc.go delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/scram/parse.go delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/scram/scram.go delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/scram/server.go delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/scram/server_conv.go delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/stringprep/.gitignore delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/stringprep/CHANGELOG.md delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/stringprep/LICENSE delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/stringprep/README.md delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/stringprep/bidi.go delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/stringprep/doc.go delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/stringprep/error.go delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/stringprep/map.go delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/stringprep/profile.go delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/stringprep/saslprep.go delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/stringprep/set.go delete mode 100644 backend/services/controller/vendor/github.com/xdg-go/stringprep/tables.go delete mode 100644 backend/services/controller/vendor/github.com/youmark/pkcs8/.gitignore delete mode 100644 backend/services/controller/vendor/github.com/youmark/pkcs8/.travis.yml delete mode 100644 backend/services/controller/vendor/github.com/youmark/pkcs8/LICENSE delete mode 100644 backend/services/controller/vendor/github.com/youmark/pkcs8/README delete mode 100644 backend/services/controller/vendor/github.com/youmark/pkcs8/README.md delete mode 100644 backend/services/controller/vendor/github.com/youmark/pkcs8/pkcs8.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/LICENSE delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bson.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/mode.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/decoder.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/doc.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/encoder.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/marshal.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/raw.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/registry.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/types.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/event/doc.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/event/monitoring.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/internal/background_context.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/internal/cancellation_listener.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/internal/const.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/internal/csfle_util.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/internal/csot_util.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/internal/error.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/internal/http.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/bits.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/exp.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/normal.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rand.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rng.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/internal/string_util.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/internal/uri_validation_errors.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/address/addr.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/client.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/collection.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/crypt_retrievers.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/database.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/description/description.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/description/server_kind.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/description/topology.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/description/topology_kind.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/description/topology_version.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/description/version_range.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/doc.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/errors.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/index_options_builder.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/datakeyoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/deleteoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/distinctoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/doc.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/encryptoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/estimatedcountoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/findoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/gridfsoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/indexoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/insertoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/listcollectionsoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/listdatabasesoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/replaceoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/rewrapdatakeyoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/runcmdoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/serverapioptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/sessionoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/transactionoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/options/updateoptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/readconcern/readconcern.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/readpref/mode.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/readpref/options.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/readpref/readpref.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/results.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/session.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/single_result.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/util.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/tag/tag.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/version/version.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/bsonx/array.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_arraybuilder.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/tables.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/bsonx/constructor.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/bsonx/document.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/bsonx/element.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/bsonx/mdocument.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/bsonx/primitive_codecs.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/bsonx/reflectionfree_d_codec.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/bsonx/registry.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/bsonx/value.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/auth.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/aws_conv.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/conversation.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/cred.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/default.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_enabled.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_supported.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/credentials.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/doc.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/request.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/rest.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/rules.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/signer.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.c delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.h delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.c delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.h delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbaws.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbcr.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/plain.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/sasl.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/scram.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/util.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/x509.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batch_cursor.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batches.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/crypt.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/errors.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/legacy.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/list_collections_batch_cursor.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/binary.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors_not_enabled.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context_not_enabled.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context_not_enabled.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_context_options.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_options.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/state.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/cache.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/config.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/options.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/abort_transaction.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/commit_transaction.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/count.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/createIndexes.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/delete.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/distinct.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_collection.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_database.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_indexes.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/end_sessions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/errors.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find_and_modify.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/insert.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/listDatabases.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_collections.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_indexes.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation_exhaust.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/serverapioptions.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/client_session.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/cluster_clock.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/options.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/server_session.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/session_pool.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/DESIGN.md delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/cancellation_listener.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_legacy.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_options.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/diff.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/errors.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/fsm.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool_generation_counter.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server_options.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source_1_16.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source_1_17.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options.go delete mode 100644 backend/services/controller/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/LICENSE delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/PATENTS delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/bcrypt/base64.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/bcrypt/bcrypt.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/blake2b/blake2b.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/blake2b/blake2x.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/blake2b/register.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/blowfish/block.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/blowfish/cipher.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/blowfish/const.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/curve25519/curve25519.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/curve25519/internal/field/README delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/ed25519/ed25519.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/internal/alias/alias.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/internal/alias/alias_purego.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/nacl/box/box.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/ocsp/ocsp.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go delete mode 100644 backend/services/controller/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go delete mode 100644 backend/services/controller/vendor/golang.org/x/net/LICENSE delete mode 100644 backend/services/controller/vendor/golang.org/x/net/PATENTS delete mode 100644 backend/services/controller/vendor/golang.org/x/net/context/context.go delete mode 100644 backend/services/controller/vendor/golang.org/x/net/context/go17.go delete mode 100644 backend/services/controller/vendor/golang.org/x/net/context/go19.go delete mode 100644 backend/services/controller/vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 backend/services/controller/vendor/golang.org/x/net/context/pre_go19.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sync/AUTHORS delete mode 100644 backend/services/controller/vendor/golang.org/x/sync/CONTRIBUTORS delete mode 100644 backend/services/controller/vendor/golang.org/x/sync/LICENSE delete mode 100644 backend/services/controller/vendor/golang.org/x/sync/PATENTS delete mode 100644 backend/services/controller/vendor/golang.org/x/sync/errgroup/errgroup.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/LICENSE delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/PATENTS delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/byteorder.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_aix.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_arm.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_arm64.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_arm64.s delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_linux.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_loong64.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_mips64x.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_mipsx.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_other_arm.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_riscv64.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_s390x.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_s390x.s delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_wasm.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_x86.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_x86.s delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_zos.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/endian_big.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/endian_little.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/hwcap_linux.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/parse.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/runtime_auxv.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go delete mode 100644 backend/services/controller/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/LICENSE delete mode 100644 backend/services/controller/vendor/golang.org/x/text/PATENTS delete mode 100644 backend/services/controller/vendor/golang.org/x/text/cases/cases.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/cases/context.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/cases/fold.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/cases/icu.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/cases/info.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/cases/map.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/cases/tables10.0.0.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/cases/tables11.0.0.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/cases/tables12.0.0.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/cases/tables13.0.0.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/cases/tables15.0.0.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/cases/tables9.0.0.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/cases/trieval.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/internal/internal.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/internal/language/common.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/internal/language/compact.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/internal/language/compact/compact.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/internal/language/compact/language.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/internal/language/compact/parents.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/internal/language/compact/tables.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/internal/language/compact/tags.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/internal/language/compose.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/internal/language/coverage.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/internal/language/language.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/internal/language/lookup.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/internal/language/match.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/internal/language/parse.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/internal/language/tables.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/internal/language/tags.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/internal/match.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/internal/tag/tag.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/language/coverage.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/language/doc.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/language/language.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/language/match.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/language/parse.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/language/tables.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/language/tags.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/transform/transform.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/unicode/norm/composition.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/unicode/norm/forminfo.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/unicode/norm/input.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/unicode/norm/iter.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/unicode/norm/normalize.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/unicode/norm/readwriter.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/unicode/norm/transform.go delete mode 100644 backend/services/controller/vendor/golang.org/x/text/unicode/norm/trie.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/LICENSE delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/PATENTS delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/encoding/prototext/decode.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/encoding/prototext/doc.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/encoding/prototext/encode.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/encoding/protowire/wire.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/descopts/options.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/detrand/rand.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/errors/errors.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/errors/is_go112.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/errors/is_go113.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/filedesc/build.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/filedesc/desc.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/filedesc/editions.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/filetype/build.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/flags/flags.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/genid/any_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/genid/api_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/genid/doc.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/genid/goname.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/genid/map_entry.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/genid/type_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/genid/wrappers.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/api_export.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/checkinit.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/codec_field.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/codec_map.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/codec_message.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/convert.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/convert_list.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/convert_map.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/decode.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/encode.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/enum.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/extension.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/merge.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/message.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/validate.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/impl/weak.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/order/order.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/order/range.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/pragma/pragma.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/set/ints.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/strs/strings.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/internal/version/version.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/proto/checkinit.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/proto/decode.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/proto/decode_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/proto/doc.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/proto/encode.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/proto/encode_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/proto/equal.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/proto/extension.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/proto/merge.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/proto/messageset.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/proto/proto.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/proto/proto_methods.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/proto/proto_reflect.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/proto/reset.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/proto/size.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/proto/size_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/proto/wrappers.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go delete mode 100644 backend/services/controller/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go delete mode 100644 backend/services/controller/vendor/modules.txt diff --git a/backend/services/controller/go.mod b/backend/services/controller/go.mod index 77ab144..783333c 100644 --- a/backend/services/controller/go.mod +++ b/backend/services/controller/go.mod @@ -10,8 +10,8 @@ require ( github.com/nats-io/nats.go v1.33.1 github.com/rs/cors v1.9.0 go.mongodb.org/mongo-driver v1.11.3 - golang.org/x/crypto v0.18.0 - golang.org/x/net v0.17.0 + golang.org/x/crypto v0.24.0 + golang.org/x/net v0.26.0 google.golang.org/protobuf v1.33.0 ) @@ -27,7 +27,7 @@ require ( github.com/xdg-go/scram v1.1.1 // indirect github.com/xdg-go/stringprep v1.0.3 // indirect github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.16.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect ) diff --git a/backend/services/controller/go.sum b/backend/services/controller/go.sum index 87f5336..1108006 100644 --- a/backend/services/controller/go.sum +++ b/backend/services/controller/go.sum @@ -52,23 +52,24 @@ github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7Jul go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/.gitignore b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/.gitignore deleted file mode 100644 index 09573e0..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.DS_Store -bin -.idea/ - diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/LICENSE b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/LICENSE deleted file mode 100644 index 35dbc25..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -Copyright (c) 2012 Dave Grijalva -Copyright (c) 2021 golang-jwt maintainers - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md deleted file mode 100644 index ff9c57e..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md +++ /dev/null @@ -1,195 +0,0 @@ -# Migration Guide (v5.0.0) - -Version `v5` contains a major rework of core functionalities in the `jwt-go` -library. This includes support for several validation options as well as a -re-design of the `Claims` interface. Lastly, we reworked how errors work under -the hood, which should provide a better overall developer experience. - -Starting from [v5.0.0](https://github.com/golang-jwt/jwt/releases/tag/v5.0.0), -the import path will be: - - "github.com/golang-jwt/jwt/v5" - -For most users, changing the import path *should* suffice. However, since we -intentionally changed and cleaned some of the public API, existing programs -might need to be updated. The following sections describe significant changes -and corresponding updates for existing programs. - -## Parsing and Validation Options - -Under the hood, a new `Validator` struct takes care of validating the claims. A -long awaited feature has been the option to fine-tune the validation of tokens. -This is now possible with several `ParserOption` functions that can be appended -to most `Parse` functions, such as `ParseWithClaims`. The most important options -and changes are: - * Added `WithLeeway` to support specifying the leeway that is allowed when - validating time-based claims, such as `exp` or `nbf`. - * Changed default behavior to not check the `iat` claim. Usage of this claim - is OPTIONAL according to the JWT RFC. The claim itself is also purely - informational according to the RFC, so a strict validation failure is not - recommended. If you want to check for sensible values in these claims, - please use the `WithIssuedAt` parser option. - * Added `WithAudience`, `WithSubject` and `WithIssuer` to support checking for - expected `aud`, `sub` and `iss`. - * Added `WithStrictDecoding` and `WithPaddingAllowed` options to allow - previously global settings to enable base64 strict encoding and the parsing - of base64 strings with padding. The latter is strictly speaking against the - standard, but unfortunately some of the major identity providers issue some - of these incorrect tokens. Both options are disabled by default. - -## Changes to the `Claims` interface - -### Complete Restructuring - -Previously, the claims interface was satisfied with an implementation of a -`Valid() error` function. This had several issues: - * The different claim types (struct claims, map claims, etc.) then contained - similar (but not 100 % identical) code of how this validation was done. This - lead to a lot of (almost) duplicate code and was hard to maintain - * It was not really semantically close to what a "claim" (or a set of claims) - really is; which is a list of defined key/value pairs with a certain - semantic meaning. - -Since all the validation functionality is now extracted into the validator, all -`VerifyXXX` and `Valid` functions have been removed from the `Claims` interface. -Instead, the interface now represents a list of getters to retrieve values with -a specific meaning. This allows us to completely decouple the validation logic -with the underlying storage representation of the claim, which could be a -struct, a map or even something stored in a database. - -```go -type Claims interface { - GetExpirationTime() (*NumericDate, error) - GetIssuedAt() (*NumericDate, error) - GetNotBefore() (*NumericDate, error) - GetIssuer() (string, error) - GetSubject() (string, error) - GetAudience() (ClaimStrings, error) -} -``` - -Users that previously directly called the `Valid` function on their claims, -e.g., to perform validation independently of parsing/verifying a token, can now -use the `jwt.NewValidator` function to create a `Validator` independently of the -`Parser`. - -```go -var v = jwt.NewValidator(jwt.WithLeeway(5*time.Second)) -v.Validate(myClaims) -``` - -### Supported Claim Types and Removal of `StandardClaims` - -The two standard claim types supported by this library, `MapClaims` and -`RegisteredClaims` both implement the necessary functions of this interface. The -old `StandardClaims` struct, which has already been deprecated in `v4` is now -removed. - -Users using custom claims, in most cases, will not experience any changes in the -behavior as long as they embedded `RegisteredClaims`. If they created a new -claim type from scratch, they now need to implemented the proper getter -functions. - -### Migrating Application Specific Logic of the old `Valid` - -Previously, users could override the `Valid` method in a custom claim, for -example to extend the validation with application-specific claims. However, this -was always very dangerous, since once could easily disable the standard -validation and signature checking. - -In order to avoid that, while still supporting the use-case, a new -`ClaimsValidator` interface has been introduced. This interface consists of the -`Validate() error` function. If the validator sees, that a `Claims` struct -implements this interface, the errors returned to the `Validate` function will -be *appended* to the regular standard validation. It is not possible to disable -the standard validation anymore (even only by accident). - -Usage examples can be found in [example_test.go](./example_test.go), to build -claims structs like the following. - -```go -// MyCustomClaims includes all registered claims, plus Foo. -type MyCustomClaims struct { - Foo string `json:"foo"` - jwt.RegisteredClaims -} - -// Validate can be used to execute additional application-specific claims -// validation. -func (m MyCustomClaims) Validate() error { - if m.Foo != "bar" { - return errors.New("must be foobar") - } - - return nil -} -``` - -## Changes to the `Token` and `Parser` struct - -The previously global functions `DecodeSegment` and `EncodeSegment` were moved -to the `Parser` and `Token` struct respectively. This will allow us in the -future to configure the behavior of these two based on options supplied on the -parser or the token (creation). This also removes two previously global -variables and moves them to parser options `WithStrictDecoding` and -`WithPaddingAllowed`. - -In order to do that, we had to adjust the way signing methods work. Previously -they were given a base64 encoded signature in `Verify` and were expected to -return a base64 encoded version of the signature in `Sign`, both as a `string`. -However, this made it necessary to have `DecodeSegment` and `EncodeSegment` -global and was a less than perfect design because we were repeating -encoding/decoding steps for all signing methods. Now, `Sign` and `Verify` -operate on a decoded signature as a `[]byte`, which feels more natural for a -cryptographic operation anyway. Lastly, `Parse` and `SignedString` take care of -the final encoding/decoding part. - -In addition to that, we also changed the `Signature` field on `Token` from a -`string` to `[]byte` and this is also now populated with the decoded form. This -is also more consistent, because the other parts of the JWT, mainly `Header` and -`Claims` were already stored in decoded form in `Token`. Only the signature was -stored in base64 encoded form, which was redundant with the information in the -`Raw` field, which contains the complete token as base64. - -```go -type Token struct { - Raw string // Raw contains the raw token - Method SigningMethod // Method is the signing method used or to be used - Header map[string]interface{} // Header is the first segment of the token in decoded form - Claims Claims // Claims is the second segment of the token in decoded form - Signature []byte // Signature is the third segment of the token in decoded form - Valid bool // Valid specifies if the token is valid -} -``` - -Most (if not all) of these changes should not impact the normal usage of this -library. Only users directly accessing the `Signature` field as well as -developers of custom signing methods should be affected. - -# Migration Guide (v4.0.0) - -Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0), -the import path will be: - - "github.com/golang-jwt/jwt/v4" - -The `/v4` version will be backwards compatible with existing `v3.x.y` tags in -this repo, as well as `github.com/dgrijalva/jwt-go`. For most users this should -be a drop-in replacement, if you're having troubles migrating, please open an -issue. - -You can replace all occurrences of `github.com/dgrijalva/jwt-go` or -`github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually -or by using tools such as `sed` or `gofmt`. - -And then you'd typically run: - -``` -go get github.com/golang-jwt/jwt/v4 -go mod tidy -``` - -# Older releases (before v3.2.0) - -The original migration guide for older releases can be found at -https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md. diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/README.md b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/README.md deleted file mode 100644 index 964598a..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/README.md +++ /dev/null @@ -1,167 +0,0 @@ -# jwt-go - -[![build](https://github.com/golang-jwt/jwt/actions/workflows/build.yml/badge.svg)](https://github.com/golang-jwt/jwt/actions/workflows/build.yml) -[![Go -Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt/v5.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt/v5) -[![Coverage Status](https://coveralls.io/repos/github/golang-jwt/jwt/badge.svg?branch=main)](https://coveralls.io/github/golang-jwt/jwt?branch=main) - -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) -implementation of [JSON Web -Tokens](https://datatracker.ietf.org/doc/html/rfc7519). - -Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0) -this project adds Go module support, but maintains backwards compatibility with -older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. See the -[`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. Version -v5.0.0 introduces major improvements to the validation of tokens, but is not -entirely backwards compatible. - -> After the original author of the library suggested migrating the maintenance -> of `jwt-go`, a dedicated team of open source maintainers decided to clone the -> existing library into this repository. See -> [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a -> detailed discussion on this topic. - - -**SECURITY NOTICE:** Some older versions of Go have a security issue in the -crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue -[dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more -detail. - -**SECURITY NOTICE:** It's important that you [validate the `alg` presented is -what you -expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). -This library attempts to make it easy to do the right thing by requiring key -types match the expected alg, but you should take the extra step to verify it in -your usage. See the examples provided. - -### Supported Go versions - -Our support of Go versions is aligned with Go's [version release -policy](https://golang.org/doc/devel/release#policy). So we will support a major -version of Go until there are two newer major releases. We no longer support -building jwt-go with unsupported Go versions, as these contain security -vulnerabilities which will not be fixed. - -## What the heck is a JWT? - -JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web -Tokens. - -In short, it's a signed JSON object that does something useful (for example, -authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is -made of three parts, separated by `.`'s. The first two parts are JSON objects, -that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648) -encoded. The last part is the signature, encoded the same way. - -The first part is called the header. It contains the necessary information for -verifying the last part, the signature. For example, which encryption method -was used for signing and what key was used. - -The part in the middle is the interesting bit. It's called the Claims and -contains the actual stuff you care about. Refer to [RFC -7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about -reserved keys and the proper way to add your own. - -## What's in the box? - -This library supports the parsing and verification as well as the generation and -signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, -RSA-PSS, and ECDSA, though hooks are present for adding your own. - -## Installation Guidelines - -1. To install the jwt package, you first need to have - [Go](https://go.dev/doc/install) installed, then you can use the command - below to add `jwt-go` as a dependency in your Go program. - -```sh -go get -u github.com/golang-jwt/jwt/v5 -``` - -2. Import it in your code: - -```go -import "github.com/golang-jwt/jwt/v5" -``` - -## Usage - -A detailed usage guide, including how to sign and verify tokens can be found on -our [documentation website](https://golang-jwt.github.io/jwt/usage/create/). - -## Examples - -See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v5) -for examples of usage: - -* [Simple example of parsing and validating a - token](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#example-Parse-Hmac) -* [Simple example of building and signing a - token](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#example-New-Hmac) -* [Directory of - Examples](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#pkg-examples) - -## Compliance - -This library was last reviewed to comply with [RFC -7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few -notable differences: - -* In order to protect against accidental use of [Unsecured - JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using - `alg=none` will only be accepted if the constant - `jwt.UnsafeAllowNoneSignatureType` is provided as the key. - -## Project Status & Versioning - -This library is considered production ready. Feedback and feature requests are -appreciated. The API should be considered stable. There should be very few -backwards-incompatible changes outside of major version updates (and only with -good reason). - -This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull -requests will land on `main`. Periodically, versions will be tagged from -`main`. You can find all the releases on [the project releases -page](https://github.com/golang-jwt/jwt/releases). - -**BREAKING CHANGES:*** A full list of breaking changes is available in -`VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating -your code. - -## Extensions - -This library publishes all the necessary components for adding your own signing -methods or key functions. Simply implement the `SigningMethod` interface and -register a factory method using `RegisterSigningMethod` or provide a -`jwt.Keyfunc`. - -A common use case would be integrating with different 3rd party signature -providers, like key management services from various cloud providers or Hardware -Security Modules (HSMs) or to implement additional standards. - -| Extension | Purpose | Repo | -| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ | -| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go | -| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms | -| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc | - -*Disclaimer*: Unless otherwise specified, these integrations are maintained by -third parties and should not be considered as a primary offer by any of the -mentioned cloud providers - -## More - -Go package documentation can be found [on -pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt/v5). Additional -documentation can be found on [our project -page](https://golang-jwt.github.io/jwt/). - -The command line utility included in this project (cmd/jwt) provides a -straightforward example of token creation and parsing as well as a useful tool -for debugging your own integration. You'll also find several implementation -examples in the documentation. - -[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version -of the JWT logo, which is distributed under the terms of the [MIT -License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt). diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md deleted file mode 100644 index b08402c..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md +++ /dev/null @@ -1,19 +0,0 @@ -# Security Policy - -## Supported Versions - -As of February 2022 (and until this document is updated), the latest version `v4` is supported. - -## Reporting a Vulnerability - -If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s). - -You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem. - -## Public Discussions - -Please avoid publicly discussing a potential security vulnerability. - -Let's take this offline and find a solution first, this limits the potential impact as much as possible. - -We appreciate your help! diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md deleted file mode 100644 index b5039e4..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md +++ /dev/null @@ -1,137 +0,0 @@ -# `jwt-go` Version History - -The following version history is kept for historic purposes. To retrieve the current changes of each version, please refer to the change-log of the specific release versions on https://github.com/golang-jwt/jwt/releases. - -## 4.0.0 - -* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`. - -## 3.2.2 - -* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)). -* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)). -* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)). -* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)). - -## 3.2.1 - -* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code - * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt` -* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160 - -#### 3.2.0 - -* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation -* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate -* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. -* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. - -#### 3.1.0 - -* Improvements to `jwt` command line tool -* Added `SkipClaimsValidation` option to `Parser` -* Documentation updates - -#### 3.0.0 - -* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code - * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. - * `ParseFromRequest` has been moved to `request` subpackage and usage has changed - * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. -* Other Additions and Changes - * Added `Claims` interface type to allow users to decode the claims into a custom type - * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. - * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage - * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` - * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. - * Added several new, more specific, validation errors to error type bitmask - * Moved examples from README to executable example files - * Signing method registry is now thread safe - * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) - -#### 2.7.0 - -This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. - -* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying -* Error text for expired tokens includes how long it's been expired -* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` -* Documentation updates - -#### 2.6.0 - -* Exposed inner error within ValidationError -* Fixed validation errors when using UseJSONNumber flag -* Added several unit tests - -#### 2.5.0 - -* Added support for signing method none. You shouldn't use this. The API tries to make this clear. -* Updated/fixed some documentation -* Added more helpful error message when trying to parse tokens that begin with `BEARER ` - -#### 2.4.0 - -* Added new type, Parser, to allow for configuration of various parsing parameters - * You can now specify a list of valid signing methods. Anything outside this set will be rejected. - * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON -* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) -* Fixed some bugs with ECDSA parsing - -#### 2.3.0 - -* Added support for ECDSA signing methods -* Added support for RSA PSS signing methods (requires go v1.4) - -#### 2.2.0 - -* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. - -#### 2.1.0 - -Backwards compatible API change that was missed in 2.0.0. - -* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` - -#### 2.0.0 - -There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. - -The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. - -It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. - -* **Compatibility Breaking Changes** - * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` - * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` - * `KeyFunc` now returns `interface{}` instead of `[]byte` - * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key - * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key -* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodHS256` - * Added public package global `SigningMethodHS384` - * Added public package global `SigningMethodHS512` -* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodRS256` - * Added public package global `SigningMethodRS384` - * Added public package global `SigningMethodRS512` -* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. -* Refactored the RSA implementation to be easier to read -* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` - -## 1.0.2 - -* Fixed bug in parsing public keys from certificates -* Added more tests around the parsing of keys for RS256 -* Code refactoring in RS256 implementation. No functional changes - -## 1.0.1 - -* Fixed panic if RS256 signing method was passed an invalid key - -## 1.0.0 - -* First versioned release -* API stabilized -* Supports creating, signing, parsing, and validating JWT tokens -* Supports RS256 and HS256 signing methods diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/claims.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/claims.go deleted file mode 100644 index d50ff3d..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/claims.go +++ /dev/null @@ -1,16 +0,0 @@ -package jwt - -// Claims represent any form of a JWT Claims Set according to -// https://datatracker.ietf.org/doc/html/rfc7519#section-4. In order to have a -// common basis for validation, it is required that an implementation is able to -// supply at least the claim names provided in -// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1 namely `exp`, -// `iat`, `nbf`, `iss`, `sub` and `aud`. -type Claims interface { - GetExpirationTime() (*NumericDate, error) - GetIssuedAt() (*NumericDate, error) - GetNotBefore() (*NumericDate, error) - GetIssuer() (string, error) - GetSubject() (string, error) - GetAudience() (ClaimStrings, error) -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/doc.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/doc.go deleted file mode 100644 index a86dc1a..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html -// -// See README.md for more info. -package jwt diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go deleted file mode 100644 index ca85659..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go +++ /dev/null @@ -1,134 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rand" - "errors" - "math/big" -) - -var ( - // Sadly this is missing from crypto/ecdsa compared to crypto/rsa - ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") -) - -// SigningMethodECDSA implements the ECDSA family of signing methods. -// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification -type SigningMethodECDSA struct { - Name string - Hash crypto.Hash - KeySize int - CurveBits int -} - -// Specific instances for EC256 and company -var ( - SigningMethodES256 *SigningMethodECDSA - SigningMethodES384 *SigningMethodECDSA - SigningMethodES512 *SigningMethodECDSA -) - -func init() { - // ES256 - SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} - RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { - return SigningMethodES256 - }) - - // ES384 - SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} - RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { - return SigningMethodES384 - }) - - // ES512 - SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} - RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { - return SigningMethodES512 - }) -} - -func (m *SigningMethodECDSA) Alg() string { - return m.Name -} - -// Verify implements token verification for the SigningMethod. -// For this verify method, key must be an ecdsa.PublicKey struct -func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key interface{}) error { - // Get the key - var ecdsaKey *ecdsa.PublicKey - switch k := key.(type) { - case *ecdsa.PublicKey: - ecdsaKey = k - default: - return newError("ECDSA verify expects *ecsda.PublicKey", ErrInvalidKeyType) - } - - if len(sig) != 2*m.KeySize { - return ErrECDSAVerification - } - - r := big.NewInt(0).SetBytes(sig[:m.KeySize]) - s := big.NewInt(0).SetBytes(sig[m.KeySize:]) - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus { - return nil - } - - return ErrECDSAVerification -} - -// Sign implements token signing for the SigningMethod. -// For this signing method, key must be an ecdsa.PrivateKey struct -func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) ([]byte, error) { - // Get the key - var ecdsaKey *ecdsa.PrivateKey - switch k := key.(type) { - case *ecdsa.PrivateKey: - ecdsaKey = k - default: - return nil, newError("ECDSA sign expects *ecsda.PrivateKey", ErrInvalidKeyType) - } - - // Create the hasher - if !m.Hash.Available() { - return nil, ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return r, s - if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { - curveBits := ecdsaKey.Curve.Params().BitSize - - if m.CurveBits != curveBits { - return nil, ErrInvalidKey - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes += 1 - } - - // We serialize the outputs (r and s) into big-endian byte arrays - // padded with zeros on the left to make sure the sizes work out. - // Output must be 2*keyBytes long. - out := make([]byte, 2*keyBytes) - r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output. - s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output. - - return out, nil - } else { - return nil, err - } -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go deleted file mode 100644 index 5700636..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go +++ /dev/null @@ -1,69 +0,0 @@ -package jwt - -import ( - "crypto/ecdsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key") - ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key") -) - -// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure -func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { - return nil, err - } - } - - var pkey *ecdsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { - return nil, ErrNotECPrivateKey - } - - return pkey, nil -} - -// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key -func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *ecdsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { - return nil, ErrNotECPublicKey - } - - return pkey, nil -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/ed25519.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/ed25519.go deleted file mode 100644 index c213811..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/ed25519.go +++ /dev/null @@ -1,79 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/ed25519" - "crypto/rand" - "errors" -) - -var ( - ErrEd25519Verification = errors.New("ed25519: verification error") -) - -// SigningMethodEd25519 implements the EdDSA family. -// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification -type SigningMethodEd25519 struct{} - -// Specific instance for EdDSA -var ( - SigningMethodEdDSA *SigningMethodEd25519 -) - -func init() { - SigningMethodEdDSA = &SigningMethodEd25519{} - RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod { - return SigningMethodEdDSA - }) -} - -func (m *SigningMethodEd25519) Alg() string { - return "EdDSA" -} - -// Verify implements token verification for the SigningMethod. -// For this verify method, key must be an ed25519.PublicKey -func (m *SigningMethodEd25519) Verify(signingString string, sig []byte, key interface{}) error { - var ed25519Key ed25519.PublicKey - var ok bool - - if ed25519Key, ok = key.(ed25519.PublicKey); !ok { - return newError("Ed25519 verify expects ed25519.PublicKey", ErrInvalidKeyType) - } - - if len(ed25519Key) != ed25519.PublicKeySize { - return ErrInvalidKey - } - - // Verify the signature - if !ed25519.Verify(ed25519Key, []byte(signingString), sig) { - return ErrEd25519Verification - } - - return nil -} - -// Sign implements token signing for the SigningMethod. -// For this signing method, key must be an ed25519.PrivateKey -func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) ([]byte, error) { - var ed25519Key crypto.Signer - var ok bool - - if ed25519Key, ok = key.(crypto.Signer); !ok { - return nil, newError("Ed25519 sign expects crypto.Signer", ErrInvalidKeyType) - } - - if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok { - return nil, ErrInvalidKey - } - - // Sign the string and return the result. ed25519 performs a two-pass hash - // as part of its algorithm. Therefore, we need to pass a non-prehashed - // message into the Sign function, as indicated by crypto.Hash(0) - sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0)) - if err != nil { - return nil, err - } - - return sig, nil -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go deleted file mode 100644 index cdb5e68..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go +++ /dev/null @@ -1,64 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/ed25519" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key") - ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key") -) - -// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key -func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { - return nil, err - } - - var pkey ed25519.PrivateKey - var ok bool - if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok { - return nil, ErrNotEdPrivateKey - } - - return pkey, nil -} - -// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key -func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - return nil, err - } - - var pkey ed25519.PublicKey - var ok bool - if pkey, ok = parsedKey.(ed25519.PublicKey); !ok { - return nil, ErrNotEdPublicKey - } - - return pkey, nil -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/errors.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/errors.go deleted file mode 100644 index 23bb616..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/errors.go +++ /dev/null @@ -1,49 +0,0 @@ -package jwt - -import ( - "errors" - "strings" -) - -var ( - ErrInvalidKey = errors.New("key is invalid") - ErrInvalidKeyType = errors.New("key is of invalid type") - ErrHashUnavailable = errors.New("the requested hash function is unavailable") - ErrTokenMalformed = errors.New("token is malformed") - ErrTokenUnverifiable = errors.New("token is unverifiable") - ErrTokenSignatureInvalid = errors.New("token signature is invalid") - ErrTokenRequiredClaimMissing = errors.New("token is missing required claim") - ErrTokenInvalidAudience = errors.New("token has invalid audience") - ErrTokenExpired = errors.New("token is expired") - ErrTokenUsedBeforeIssued = errors.New("token used before issued") - ErrTokenInvalidIssuer = errors.New("token has invalid issuer") - ErrTokenInvalidSubject = errors.New("token has invalid subject") - ErrTokenNotValidYet = errors.New("token is not valid yet") - ErrTokenInvalidId = errors.New("token has invalid id") - ErrTokenInvalidClaims = errors.New("token has invalid claims") - ErrInvalidType = errors.New("invalid type for claim") -) - -// joinedError is an error type that works similar to what [errors.Join] -// produces, with the exception that it has a nice error string; mainly its -// error messages are concatenated using a comma, rather than a newline. -type joinedError struct { - errs []error -} - -func (je joinedError) Error() string { - msg := []string{} - for _, err := range je.errs { - msg = append(msg, err.Error()) - } - - return strings.Join(msg, ", ") -} - -// joinErrors joins together multiple errors. Useful for scenarios where -// multiple errors next to each other occur, e.g., in claims validation. -func joinErrors(errs ...error) error { - return &joinedError{ - errs: errs, - } -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go deleted file mode 100644 index a893d35..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go +++ /dev/null @@ -1,47 +0,0 @@ -//go:build go1.20 -// +build go1.20 - -package jwt - -import ( - "fmt" -) - -// Unwrap implements the multiple error unwrapping for this error type, which is -// possible in Go 1.20. -func (je joinedError) Unwrap() []error { - return je.errs -} - -// newError creates a new error message with a detailed error message. The -// message will be prefixed with the contents of the supplied error type. -// Additionally, more errors, that provide more context can be supplied which -// will be appended to the message. This makes use of Go 1.20's possibility to -// include more than one %w formatting directive in [fmt.Errorf]. -// -// For example, -// -// newError("no keyfunc was provided", ErrTokenUnverifiable) -// -// will produce the error string -// -// "token is unverifiable: no keyfunc was provided" -func newError(message string, err error, more ...error) error { - var format string - var args []any - if message != "" { - format = "%w: %s" - args = []any{err, message} - } else { - format = "%w" - args = []any{err} - } - - for _, e := range more { - format += ": %w" - args = append(args, e) - } - - err = fmt.Errorf(format, args...) - return err -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go deleted file mode 100644 index 2ad542f..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go +++ /dev/null @@ -1,78 +0,0 @@ -//go:build !go1.20 -// +build !go1.20 - -package jwt - -import ( - "errors" - "fmt" -) - -// Is implements checking for multiple errors using [errors.Is], since multiple -// error unwrapping is not possible in versions less than Go 1.20. -func (je joinedError) Is(err error) bool { - for _, e := range je.errs { - if errors.Is(e, err) { - return true - } - } - - return false -} - -// wrappedErrors is a workaround for wrapping multiple errors in environments -// where Go 1.20 is not available. It basically uses the already implemented -// functionality of joinedError to handle multiple errors with supplies a -// custom error message that is identical to the one we produce in Go 1.20 using -// multiple %w directives. -type wrappedErrors struct { - msg string - joinedError -} - -// Error returns the stored error string -func (we wrappedErrors) Error() string { - return we.msg -} - -// newError creates a new error message with a detailed error message. The -// message will be prefixed with the contents of the supplied error type. -// Additionally, more errors, that provide more context can be supplied which -// will be appended to the message. Since we cannot use of Go 1.20's possibility -// to include more than one %w formatting directive in [fmt.Errorf], we have to -// emulate that. -// -// For example, -// -// newError("no keyfunc was provided", ErrTokenUnverifiable) -// -// will produce the error string -// -// "token is unverifiable: no keyfunc was provided" -func newError(message string, err error, more ...error) error { - // We cannot wrap multiple errors here with %w, so we have to be a little - // bit creative. Basically, we are using %s instead of %w to produce the - // same error message and then throw the result into a custom error struct. - var format string - var args []any - if message != "" { - format = "%s: %s" - args = []any{err, message} - } else { - format = "%s" - args = []any{err} - } - errs := []error{err} - - for _, e := range more { - format += ": %s" - args = append(args, e) - errs = append(errs, e) - } - - err = &wrappedErrors{ - msg: fmt.Sprintf(format, args...), - joinedError: joinedError{errs: errs}, - } - return err -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/hmac.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/hmac.go deleted file mode 100644 index 96c6272..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/hmac.go +++ /dev/null @@ -1,104 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/hmac" - "errors" -) - -// SigningMethodHMAC implements the HMAC-SHA family of signing methods. -// Expects key type of []byte for both signing and validation -type SigningMethodHMAC struct { - Name string - Hash crypto.Hash -} - -// Specific instances for HS256 and company -var ( - SigningMethodHS256 *SigningMethodHMAC - SigningMethodHS384 *SigningMethodHMAC - SigningMethodHS512 *SigningMethodHMAC - ErrSignatureInvalid = errors.New("signature is invalid") -) - -func init() { - // HS256 - SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { - return SigningMethodHS256 - }) - - // HS384 - SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { - return SigningMethodHS384 - }) - - // HS512 - SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { - return SigningMethodHS512 - }) -} - -func (m *SigningMethodHMAC) Alg() string { - return m.Name -} - -// Verify implements token verification for the SigningMethod. Returns nil if -// the signature is valid. Key must be []byte. -// -// Note it is not advised to provide a []byte which was converted from a 'human -// readable' string using a subset of ASCII characters. To maximize entropy, you -// should ideally be providing a []byte key which was produced from a -// cryptographically random source, e.g. crypto/rand. Additional information -// about this, and why we intentionally are not supporting string as a key can -// be found on our usage guide -// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types. -func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interface{}) error { - // Verify the key is the right type - keyBytes, ok := key.([]byte) - if !ok { - return newError("HMAC verify expects []byte", ErrInvalidKeyType) - } - - // Can we use the specified hashing method? - if !m.Hash.Available() { - return ErrHashUnavailable - } - - // This signing method is symmetric, so we validate the signature - // by reproducing the signature from the signing string and key, then - // comparing that against the provided signature. - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - if !hmac.Equal(sig, hasher.Sum(nil)) { - return ErrSignatureInvalid - } - - // No validation errors. Signature is good. - return nil -} - -// Sign implements token signing for the SigningMethod. Key must be []byte. -// -// Note it is not advised to provide a []byte which was converted from a 'human -// readable' string using a subset of ASCII characters. To maximize entropy, you -// should ideally be providing a []byte key which was produced from a -// cryptographically random source, e.g. crypto/rand. Additional information -// about this, and why we intentionally are not supporting string as a key can -// be found on our usage guide https://golang-jwt.github.io/jwt/usage/signing_methods/. -func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, error) { - if keyBytes, ok := key.([]byte); ok { - if !m.Hash.Available() { - return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType) - } - - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - - return hasher.Sum(nil), nil - } - - return nil, ErrInvalidKeyType -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/map_claims.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/map_claims.go deleted file mode 100644 index b2b51a1..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/map_claims.go +++ /dev/null @@ -1,109 +0,0 @@ -package jwt - -import ( - "encoding/json" - "fmt" -) - -// MapClaims is a claims type that uses the map[string]interface{} for JSON -// decoding. This is the default claims type if you don't supply one -type MapClaims map[string]interface{} - -// GetExpirationTime implements the Claims interface. -func (m MapClaims) GetExpirationTime() (*NumericDate, error) { - return m.parseNumericDate("exp") -} - -// GetNotBefore implements the Claims interface. -func (m MapClaims) GetNotBefore() (*NumericDate, error) { - return m.parseNumericDate("nbf") -} - -// GetIssuedAt implements the Claims interface. -func (m MapClaims) GetIssuedAt() (*NumericDate, error) { - return m.parseNumericDate("iat") -} - -// GetAudience implements the Claims interface. -func (m MapClaims) GetAudience() (ClaimStrings, error) { - return m.parseClaimsString("aud") -} - -// GetIssuer implements the Claims interface. -func (m MapClaims) GetIssuer() (string, error) { - return m.parseString("iss") -} - -// GetSubject implements the Claims interface. -func (m MapClaims) GetSubject() (string, error) { - return m.parseString("sub") -} - -// parseNumericDate tries to parse a key in the map claims type as a number -// date. This will succeed, if the underlying type is either a [float64] or a -// [json.Number]. Otherwise, nil will be returned. -func (m MapClaims) parseNumericDate(key string) (*NumericDate, error) { - v, ok := m[key] - if !ok { - return nil, nil - } - - switch exp := v.(type) { - case float64: - if exp == 0 { - return nil, nil - } - - return newNumericDateFromSeconds(exp), nil - case json.Number: - v, _ := exp.Float64() - - return newNumericDateFromSeconds(v), nil - } - - return nil, newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType) -} - -// parseClaimsString tries to parse a key in the map claims type as a -// [ClaimsStrings] type, which can either be a string or an array of string. -func (m MapClaims) parseClaimsString(key string) (ClaimStrings, error) { - var cs []string - switch v := m[key].(type) { - case string: - cs = append(cs, v) - case []string: - cs = v - case []interface{}: - for _, a := range v { - vs, ok := a.(string) - if !ok { - return nil, newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType) - } - cs = append(cs, vs) - } - } - - return cs, nil -} - -// parseString tries to parse a key in the map claims type as a [string] type. -// If the key does not exist, an empty string is returned. If the key has the -// wrong type, an error is returned. -func (m MapClaims) parseString(key string) (string, error) { - var ( - ok bool - raw interface{} - iss string - ) - raw, ok = m[key] - if !ok { - return "", nil - } - - iss, ok = raw.(string) - if !ok { - return "", newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType) - } - - return iss, nil -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/none.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/none.go deleted file mode 100644 index 685c2ea..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/none.go +++ /dev/null @@ -1,50 +0,0 @@ -package jwt - -// SigningMethodNone implements the none signing method. This is required by the spec -// but you probably should never use it. -var SigningMethodNone *signingMethodNone - -const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" - -var NoneSignatureTypeDisallowedError error - -type signingMethodNone struct{} -type unsafeNoneMagicConstant string - -func init() { - SigningMethodNone = &signingMethodNone{} - NoneSignatureTypeDisallowedError = newError("'none' signature type is not allowed", ErrTokenUnverifiable) - - RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { - return SigningMethodNone - }) -} - -func (m *signingMethodNone) Alg() string { - return "none" -} - -// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Verify(signingString string, sig []byte, key interface{}) (err error) { - // Key must be UnsafeAllowNoneSignatureType to prevent accidentally - // accepting 'none' signing method - if _, ok := key.(unsafeNoneMagicConstant); !ok { - return NoneSignatureTypeDisallowedError - } - // If signing method is none, signature must be an empty string - if len(sig) != 0 { - return newError("'none' signing method with non-empty signature", ErrTokenUnverifiable) - } - - // Accept 'none' signing method. - return nil -} - -// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Sign(signingString string, key interface{}) ([]byte, error) { - if _, ok := key.(unsafeNoneMagicConstant); ok { - return []byte{}, nil - } - - return nil, NoneSignatureTypeDisallowedError -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/parser.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/parser.go deleted file mode 100644 index ecf99af..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/parser.go +++ /dev/null @@ -1,238 +0,0 @@ -package jwt - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "strings" -) - -type Parser struct { - // If populated, only these methods will be considered valid. - validMethods []string - - // Use JSON Number format in JSON decoder. - useJSONNumber bool - - // Skip claims validation during token parsing. - skipClaimsValidation bool - - validator *Validator - - decodeStrict bool - - decodePaddingAllowed bool -} - -// NewParser creates a new Parser with the specified options -func NewParser(options ...ParserOption) *Parser { - p := &Parser{ - validator: &Validator{}, - } - - // Loop through our parsing options and apply them - for _, option := range options { - option(p) - } - - return p -} - -// Parse parses, validates, verifies the signature and returns the parsed token. -// keyFunc will receive the parsed token and should return the key for validating. -func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) -} - -// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims -// interface. This provides default values which can be overridden and allows a caller to use their own type, rather -// than the default MapClaims implementation of Claims. -// -// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), -// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the -// proper memory for it before passing in the overall claims, otherwise you might run into a panic. -func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - token, parts, err := p.ParseUnverified(tokenString, claims) - if err != nil { - return token, err - } - - // Verify signing method is in the required set - if p.validMethods != nil { - var signingMethodValid = false - var alg = token.Method.Alg() - for _, m := range p.validMethods { - if m == alg { - signingMethodValid = true - break - } - } - if !signingMethodValid { - // signing method is not in the listed set - return token, newError(fmt.Sprintf("signing method %v is invalid", alg), ErrTokenSignatureInvalid) - } - } - - // Decode signature - token.Signature, err = p.DecodeSegment(parts[2]) - if err != nil { - return token, newError("could not base64 decode signature", ErrTokenMalformed, err) - } - text := strings.Join(parts[0:2], ".") - - // Lookup key(s) - if keyFunc == nil { - // keyFunc was not provided. short circuiting validation - return token, newError("no keyfunc was provided", ErrTokenUnverifiable) - } - - got, err := keyFunc(token) - if err != nil { - return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err) - } - - switch have := got.(type) { - case VerificationKeySet: - if len(have.Keys) == 0 { - return token, newError("keyfunc returned empty verification key set", ErrTokenUnverifiable) - } - // Iterate through keys and verify signature, skipping the rest when a match is found. - // Return the last error if no match is found. - for _, key := range have.Keys { - if err = token.Method.Verify(text, token.Signature, key); err == nil { - break - } - } - default: - err = token.Method.Verify(text, token.Signature, have) - } - if err != nil { - return token, newError("", ErrTokenSignatureInvalid, err) - } - - // Validate Claims - if !p.skipClaimsValidation { - // Make sure we have at least a default validator - if p.validator == nil { - p.validator = NewValidator() - } - - if err := p.validator.Validate(claims); err != nil { - return token, newError("", ErrTokenInvalidClaims, err) - } - } - - // No errors so far, token is valid. - token.Valid = true - - return token, nil -} - -// ParseUnverified parses the token but doesn't validate the signature. -// -// WARNING: Don't use this method unless you know what you're doing. -// -// It's only ever useful in cases where you know the signature is valid (since it has already -// been or will be checked elsewhere in the stack) and you want to extract values from it. -func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { - parts = strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, parts, newError("token contains an invalid number of segments", ErrTokenMalformed) - } - - token = &Token{Raw: tokenString} - - // parse Header - var headerBytes []byte - if headerBytes, err = p.DecodeSegment(parts[0]); err != nil { - return token, parts, newError("could not base64 decode header", ErrTokenMalformed, err) - } - if err = json.Unmarshal(headerBytes, &token.Header); err != nil { - return token, parts, newError("could not JSON decode header", ErrTokenMalformed, err) - } - - // parse Claims - token.Claims = claims - - claimBytes, err := p.DecodeSegment(parts[1]) - if err != nil { - return token, parts, newError("could not base64 decode claim", ErrTokenMalformed, err) - } - - // If `useJSONNumber` is enabled then we must use *json.Decoder to decode - // the claims. However, this comes with a performance penalty so only use - // it if we must and, otherwise, simple use json.Unmarshal. - if !p.useJSONNumber { - // JSON Unmarshal. Special case for map type to avoid weird pointer behavior. - if c, ok := token.Claims.(MapClaims); ok { - err = json.Unmarshal(claimBytes, &c) - } else { - err = json.Unmarshal(claimBytes, &claims) - } - } else { - dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) - dec.UseNumber() - // JSON Decode. Special case for map type to avoid weird pointer behavior. - if c, ok := token.Claims.(MapClaims); ok { - err = dec.Decode(&c) - } else { - err = dec.Decode(&claims) - } - } - if err != nil { - return token, parts, newError("could not JSON decode claim", ErrTokenMalformed, err) - } - - // Lookup signature method - if method, ok := token.Header["alg"].(string); ok { - if token.Method = GetSigningMethod(method); token.Method == nil { - return token, parts, newError("signing method (alg) is unavailable", ErrTokenUnverifiable) - } - } else { - return token, parts, newError("signing method (alg) is unspecified", ErrTokenUnverifiable) - } - - return token, parts, nil -} - -// DecodeSegment decodes a JWT specific base64url encoding. This function will -// take into account whether the [Parser] is configured with additional options, -// such as [WithStrictDecoding] or [WithPaddingAllowed]. -func (p *Parser) DecodeSegment(seg string) ([]byte, error) { - encoding := base64.RawURLEncoding - - if p.decodePaddingAllowed { - if l := len(seg) % 4; l > 0 { - seg += strings.Repeat("=", 4-l) - } - encoding = base64.URLEncoding - } - - if p.decodeStrict { - encoding = encoding.Strict() - } - return encoding.DecodeString(seg) -} - -// Parse parses, validates, verifies the signature and returns the parsed token. -// keyFunc will receive the parsed token and should return the cryptographic key -// for verifying the signature. The caller is strongly encouraged to set the -// WithValidMethods option to validate the 'alg' claim in the token matches the -// expected algorithm. For more details about the importance of validating the -// 'alg' claim, see -// https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/ -func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { - return NewParser(options...).Parse(tokenString, keyFunc) -} - -// ParseWithClaims is a shortcut for NewParser().ParseWithClaims(). -// -// Note: If you provide a custom claim implementation that embeds one of the -// standard claims (such as RegisteredClaims), make sure that a) you either -// embed a non-pointer version of the claims or b) if you are using a pointer, -// allocate the proper memory for it before passing in the overall claims, -// otherwise you might run into a panic. -func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { - return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc) -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/parser_option.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/parser_option.go deleted file mode 100644 index 88a780f..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/parser_option.go +++ /dev/null @@ -1,128 +0,0 @@ -package jwt - -import "time" - -// ParserOption is used to implement functional-style options that modify the -// behavior of the parser. To add new options, just create a function (ideally -// beginning with With or Without) that returns an anonymous function that takes -// a *Parser type as input and manipulates its configuration accordingly. -type ParserOption func(*Parser) - -// WithValidMethods is an option to supply algorithm methods that the parser -// will check. Only those methods will be considered valid. It is heavily -// encouraged to use this option in order to prevent attacks such as -// https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/. -func WithValidMethods(methods []string) ParserOption { - return func(p *Parser) { - p.validMethods = methods - } -} - -// WithJSONNumber is an option to configure the underlying JSON parser with -// UseNumber. -func WithJSONNumber() ParserOption { - return func(p *Parser) { - p.useJSONNumber = true - } -} - -// WithoutClaimsValidation is an option to disable claims validation. This -// option should only be used if you exactly know what you are doing. -func WithoutClaimsValidation() ParserOption { - return func(p *Parser) { - p.skipClaimsValidation = true - } -} - -// WithLeeway returns the ParserOption for specifying the leeway window. -func WithLeeway(leeway time.Duration) ParserOption { - return func(p *Parser) { - p.validator.leeway = leeway - } -} - -// WithTimeFunc returns the ParserOption for specifying the time func. The -// primary use-case for this is testing. If you are looking for a way to account -// for clock-skew, WithLeeway should be used instead. -func WithTimeFunc(f func() time.Time) ParserOption { - return func(p *Parser) { - p.validator.timeFunc = f - } -} - -// WithIssuedAt returns the ParserOption to enable verification -// of issued-at. -func WithIssuedAt() ParserOption { - return func(p *Parser) { - p.validator.verifyIat = true - } -} - -// WithExpirationRequired returns the ParserOption to make exp claim required. -// By default exp claim is optional. -func WithExpirationRequired() ParserOption { - return func(p *Parser) { - p.validator.requireExp = true - } -} - -// WithAudience configures the validator to require the specified audience in -// the `aud` claim. Validation will fail if the audience is not listed in the -// token or the `aud` claim is missing. -// -// NOTE: While the `aud` claim is OPTIONAL in a JWT, the handling of it is -// application-specific. Since this validation API is helping developers in -// writing secure application, we decided to REQUIRE the existence of the claim, -// if an audience is expected. -func WithAudience(aud string) ParserOption { - return func(p *Parser) { - p.validator.expectedAud = aud - } -} - -// WithIssuer configures the validator to require the specified issuer in the -// `iss` claim. Validation will fail if a different issuer is specified in the -// token or the `iss` claim is missing. -// -// NOTE: While the `iss` claim is OPTIONAL in a JWT, the handling of it is -// application-specific. Since this validation API is helping developers in -// writing secure application, we decided to REQUIRE the existence of the claim, -// if an issuer is expected. -func WithIssuer(iss string) ParserOption { - return func(p *Parser) { - p.validator.expectedIss = iss - } -} - -// WithSubject configures the validator to require the specified subject in the -// `sub` claim. Validation will fail if a different subject is specified in the -// token or the `sub` claim is missing. -// -// NOTE: While the `sub` claim is OPTIONAL in a JWT, the handling of it is -// application-specific. Since this validation API is helping developers in -// writing secure application, we decided to REQUIRE the existence of the claim, -// if a subject is expected. -func WithSubject(sub string) ParserOption { - return func(p *Parser) { - p.validator.expectedSub = sub - } -} - -// WithPaddingAllowed will enable the codec used for decoding JWTs to allow -// padding. Note that the JWS RFC7515 states that the tokens will utilize a -// Base64url encoding with no padding. Unfortunately, some implementations of -// JWT are producing non-standard tokens, and thus require support for decoding. -func WithPaddingAllowed() ParserOption { - return func(p *Parser) { - p.decodePaddingAllowed = true - } -} - -// WithStrictDecoding will switch the codec used for decoding JWTs into strict -// mode. In this mode, the decoder requires that trailing padding bits are zero, -// as described in RFC 4648 section 3.5. -func WithStrictDecoding() ParserOption { - return func(p *Parser) { - p.decodeStrict = true - } -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go deleted file mode 100644 index 77951a5..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go +++ /dev/null @@ -1,63 +0,0 @@ -package jwt - -// RegisteredClaims are a structured version of the JWT Claims Set, -// restricted to Registered Claim Names, as referenced at -// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1 -// -// This type can be used on its own, but then additional private and -// public claims embedded in the JWT will not be parsed. The typical use-case -// therefore is to embedded this in a user-defined claim type. -// -// See examples for how to use this with your own claim types. -type RegisteredClaims struct { - // the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1 - Issuer string `json:"iss,omitempty"` - - // the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2 - Subject string `json:"sub,omitempty"` - - // the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3 - Audience ClaimStrings `json:"aud,omitempty"` - - // the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4 - ExpiresAt *NumericDate `json:"exp,omitempty"` - - // the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5 - NotBefore *NumericDate `json:"nbf,omitempty"` - - // the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6 - IssuedAt *NumericDate `json:"iat,omitempty"` - - // the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7 - ID string `json:"jti,omitempty"` -} - -// GetExpirationTime implements the Claims interface. -func (c RegisteredClaims) GetExpirationTime() (*NumericDate, error) { - return c.ExpiresAt, nil -} - -// GetNotBefore implements the Claims interface. -func (c RegisteredClaims) GetNotBefore() (*NumericDate, error) { - return c.NotBefore, nil -} - -// GetIssuedAt implements the Claims interface. -func (c RegisteredClaims) GetIssuedAt() (*NumericDate, error) { - return c.IssuedAt, nil -} - -// GetAudience implements the Claims interface. -func (c RegisteredClaims) GetAudience() (ClaimStrings, error) { - return c.Audience, nil -} - -// GetIssuer implements the Claims interface. -func (c RegisteredClaims) GetIssuer() (string, error) { - return c.Issuer, nil -} - -// GetSubject implements the Claims interface. -func (c RegisteredClaims) GetSubject() (string, error) { - return c.Subject, nil -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/rsa.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/rsa.go deleted file mode 100644 index 83cbee6..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/rsa.go +++ /dev/null @@ -1,93 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// SigningMethodRSA implements the RSA family of signing methods. -// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation -type SigningMethodRSA struct { - Name string - Hash crypto.Hash -} - -// Specific instances for RS256 and company -var ( - SigningMethodRS256 *SigningMethodRSA - SigningMethodRS384 *SigningMethodRSA - SigningMethodRS512 *SigningMethodRSA -) - -func init() { - // RS256 - SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { - return SigningMethodRS256 - }) - - // RS384 - SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { - return SigningMethodRS384 - }) - - // RS512 - SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { - return SigningMethodRS512 - }) -} - -func (m *SigningMethodRSA) Alg() string { - return m.Name -} - -// Verify implements token verification for the SigningMethod -// For this signing method, must be an *rsa.PublicKey structure. -func (m *SigningMethodRSA) Verify(signingString string, sig []byte, key interface{}) error { - var rsaKey *rsa.PublicKey - var ok bool - - if rsaKey, ok = key.(*rsa.PublicKey); !ok { - return newError("RSA verify expects *rsa.PublicKey", ErrInvalidKeyType) - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) -} - -// Sign implements token signing for the SigningMethod -// For this signing method, must be an *rsa.PrivateKey structure. -func (m *SigningMethodRSA) Sign(signingString string, key interface{}) ([]byte, error) { - var rsaKey *rsa.PrivateKey - var ok bool - - // Validate type of key - if rsaKey, ok = key.(*rsa.PrivateKey); !ok { - return nil, newError("RSA sign expects *rsa.PrivateKey", ErrInvalidKeyType) - } - - // Create the hasher - if !m.Hash.Available() { - return nil, ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { - return sigBytes, nil - } else { - return nil, err - } -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go deleted file mode 100644 index 28c386e..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go +++ /dev/null @@ -1,135 +0,0 @@ -//go:build go1.4 -// +build go1.4 - -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods -type SigningMethodRSAPSS struct { - *SigningMethodRSA - Options *rsa.PSSOptions - // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS. - // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow - // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously. - // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details. - VerifyOptions *rsa.PSSOptions -} - -// Specific instances for RS/PS and company. -var ( - SigningMethodPS256 *SigningMethodRSAPSS - SigningMethodPS384 *SigningMethodRSAPSS - SigningMethodPS512 *SigningMethodRSAPSS -) - -func init() { - // PS256 - SigningMethodPS256 = &SigningMethodRSAPSS{ - SigningMethodRSA: &SigningMethodRSA{ - Name: "PS256", - Hash: crypto.SHA256, - }, - Options: &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthEqualsHash, - }, - VerifyOptions: &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - }, - } - RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { - return SigningMethodPS256 - }) - - // PS384 - SigningMethodPS384 = &SigningMethodRSAPSS{ - SigningMethodRSA: &SigningMethodRSA{ - Name: "PS384", - Hash: crypto.SHA384, - }, - Options: &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthEqualsHash, - }, - VerifyOptions: &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - }, - } - RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { - return SigningMethodPS384 - }) - - // PS512 - SigningMethodPS512 = &SigningMethodRSAPSS{ - SigningMethodRSA: &SigningMethodRSA{ - Name: "PS512", - Hash: crypto.SHA512, - }, - Options: &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthEqualsHash, - }, - VerifyOptions: &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - }, - } - RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { - return SigningMethodPS512 - }) -} - -// Verify implements token verification for the SigningMethod. -// For this verify method, key must be an rsa.PublicKey struct -func (m *SigningMethodRSAPSS) Verify(signingString string, sig []byte, key interface{}) error { - var rsaKey *rsa.PublicKey - switch k := key.(type) { - case *rsa.PublicKey: - rsaKey = k - default: - return newError("RSA-PSS verify expects *rsa.PublicKey", ErrInvalidKeyType) - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - opts := m.Options - if m.VerifyOptions != nil { - opts = m.VerifyOptions - } - - return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts) -} - -// Sign implements token signing for the SigningMethod. -// For this signing method, key must be an rsa.PrivateKey struct -func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) ([]byte, error) { - var rsaKey *rsa.PrivateKey - - switch k := key.(type) { - case *rsa.PrivateKey: - rsaKey = k - default: - return nil, newError("RSA-PSS sign expects *rsa.PrivateKey", ErrInvalidKeyType) - } - - // Create the hasher - if !m.Hash.Available() { - return nil, ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { - return sigBytes, nil - } else { - return nil, err - } -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go deleted file mode 100644 index b3aeebb..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go +++ /dev/null @@ -1,107 +0,0 @@ -package jwt - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key") - ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key") - ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key") -) - -// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key -func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - var parsedKey interface{} - if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { - return nil, err - } - } - - var pkey *rsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} - -// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password -// -// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock -// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative -// in the Go standard library for now. See https://github.com/golang/go/issues/8860. -func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - var parsedKey interface{} - - var blockDecrypted []byte - if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { - return nil, err - } - - if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { - return nil, err - } - } - - var pkey *rsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} - -// ParseRSAPublicKeyFromPEM parses a certificate or a PEM encoded PKCS1 or PKIX public key -func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - if parsedKey, err = x509.ParsePKCS1PublicKey(block.Bytes); err != nil { - return nil, err - } - } - } - - var pkey *rsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { - return nil, ErrNotRSAPublicKey - } - - return pkey, nil -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/signing_method.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/signing_method.go deleted file mode 100644 index 0d73631..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/signing_method.go +++ /dev/null @@ -1,49 +0,0 @@ -package jwt - -import ( - "sync" -) - -var signingMethods = map[string]func() SigningMethod{} -var signingMethodLock = new(sync.RWMutex) - -// SigningMethod can be used add new methods for signing or verifying tokens. It -// takes a decoded signature as an input in the Verify function and produces a -// signature in Sign. The signature is then usually base64 encoded as part of a -// JWT. -type SigningMethod interface { - Verify(signingString string, sig []byte, key interface{}) error // Returns nil if signature is valid - Sign(signingString string, key interface{}) ([]byte, error) // Returns signature or error - Alg() string // returns the alg identifier for this method (example: 'HS256') -} - -// RegisterSigningMethod registers the "alg" name and a factory function for signing method. -// This is typically done during init() in the method's implementation -func RegisterSigningMethod(alg string, f func() SigningMethod) { - signingMethodLock.Lock() - defer signingMethodLock.Unlock() - - signingMethods[alg] = f -} - -// GetSigningMethod retrieves a signing method from an "alg" string -func GetSigningMethod(alg string) (method SigningMethod) { - signingMethodLock.RLock() - defer signingMethodLock.RUnlock() - - if methodF, ok := signingMethods[alg]; ok { - method = methodF() - } - return -} - -// GetAlgorithms returns a list of registered "alg" names -func GetAlgorithms() (algs []string) { - signingMethodLock.RLock() - defer signingMethodLock.RUnlock() - - for alg := range signingMethods { - algs = append(algs, alg) - } - return -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf deleted file mode 100644 index 53745d5..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf +++ /dev/null @@ -1 +0,0 @@ -checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"] diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/token.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/token.go deleted file mode 100644 index 352873a..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/token.go +++ /dev/null @@ -1,100 +0,0 @@ -package jwt - -import ( - "crypto" - "encoding/base64" - "encoding/json" -) - -// Keyfunc will be used by the Parse methods as a callback function to supply -// the key for verification. The function receives the parsed, but unverified -// Token. This allows you to use properties in the Header of the token (such as -// `kid`) to identify which key to use. -// -// The returned interface{} may be a single key or a VerificationKeySet containing -// multiple keys. -type Keyfunc func(*Token) (interface{}, error) - -// VerificationKey represents a public or secret key for verifying a token's signature. -type VerificationKey interface { - crypto.PublicKey | []uint8 -} - -// VerificationKeySet is a set of public or secret keys. It is used by the parser to verify a token. -type VerificationKeySet struct { - Keys []VerificationKey -} - -// Token represents a JWT Token. Different fields will be used depending on -// whether you're creating or parsing/verifying a token. -type Token struct { - Raw string // Raw contains the raw token. Populated when you [Parse] a token - Method SigningMethod // Method is the signing method used or to be used - Header map[string]interface{} // Header is the first segment of the token in decoded form - Claims Claims // Claims is the second segment of the token in decoded form - Signature []byte // Signature is the third segment of the token in decoded form. Populated when you Parse a token - Valid bool // Valid specifies if the token is valid. Populated when you Parse/Verify a token -} - -// New creates a new [Token] with the specified signing method and an empty map -// of claims. Additional options can be specified, but are currently unused. -func New(method SigningMethod, opts ...TokenOption) *Token { - return NewWithClaims(method, MapClaims{}, opts...) -} - -// NewWithClaims creates a new [Token] with the specified signing method and -// claims. Additional options can be specified, but are currently unused. -func NewWithClaims(method SigningMethod, claims Claims, opts ...TokenOption) *Token { - return &Token{ - Header: map[string]interface{}{ - "typ": "JWT", - "alg": method.Alg(), - }, - Claims: claims, - Method: method, - } -} - -// SignedString creates and returns a complete, signed JWT. The token is signed -// using the SigningMethod specified in the token. Please refer to -// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types -// for an overview of the different signing methods and their respective key -// types. -func (t *Token) SignedString(key interface{}) (string, error) { - sstr, err := t.SigningString() - if err != nil { - return "", err - } - - sig, err := t.Method.Sign(sstr, key) - if err != nil { - return "", err - } - - return sstr + "." + t.EncodeSegment(sig), nil -} - -// SigningString generates the signing string. This is the most expensive part -// of the whole deal. Unless you need this for something special, just go -// straight for the SignedString. -func (t *Token) SigningString() (string, error) { - h, err := json.Marshal(t.Header) - if err != nil { - return "", err - } - - c, err := json.Marshal(t.Claims) - if err != nil { - return "", err - } - - return t.EncodeSegment(h) + "." + t.EncodeSegment(c), nil -} - -// EncodeSegment encodes a JWT specific base64url encoding with padding -// stripped. In the future, this function might take into account a -// [TokenOption]. Therefore, this function exists as a method of [Token], rather -// than a global function. -func (*Token) EncodeSegment(seg []byte) string { - return base64.RawURLEncoding.EncodeToString(seg) -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/token_option.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/token_option.go deleted file mode 100644 index b4ae3ba..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/token_option.go +++ /dev/null @@ -1,5 +0,0 @@ -package jwt - -// TokenOption is a reserved type, which provides some forward compatibility, -// if we ever want to introduce token creation-related options. -type TokenOption func(*Token) diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/types.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/types.go deleted file mode 100644 index b2655a9..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/types.go +++ /dev/null @@ -1,149 +0,0 @@ -package jwt - -import ( - "encoding/json" - "fmt" - "math" - "strconv" - "time" -) - -// TimePrecision sets the precision of times and dates within this library. This -// has an influence on the precision of times when comparing expiry or other -// related time fields. Furthermore, it is also the precision of times when -// serializing. -// -// For backwards compatibility the default precision is set to seconds, so that -// no fractional timestamps are generated. -var TimePrecision = time.Second - -// MarshalSingleStringAsArray modifies the behavior of the ClaimStrings type, -// especially its MarshalJSON function. -// -// If it is set to true (the default), it will always serialize the type as an -// array of strings, even if it just contains one element, defaulting to the -// behavior of the underlying []string. If it is set to false, it will serialize -// to a single string, if it contains one element. Otherwise, it will serialize -// to an array of strings. -var MarshalSingleStringAsArray = true - -// NumericDate represents a JSON numeric date value, as referenced at -// https://datatracker.ietf.org/doc/html/rfc7519#section-2. -type NumericDate struct { - time.Time -} - -// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct. -// It will truncate the timestamp according to the precision specified in TimePrecision. -func NewNumericDate(t time.Time) *NumericDate { - return &NumericDate{t.Truncate(TimePrecision)} -} - -// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a -// UNIX epoch with the float fraction representing non-integer seconds. -func newNumericDateFromSeconds(f float64) *NumericDate { - round, frac := math.Modf(f) - return NewNumericDate(time.Unix(int64(round), int64(frac*1e9))) -} - -// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch -// represented in NumericDate to a byte array, using the precision specified in TimePrecision. -func (date NumericDate) MarshalJSON() (b []byte, err error) { - var prec int - if TimePrecision < time.Second { - prec = int(math.Log10(float64(time.Second) / float64(TimePrecision))) - } - truncatedDate := date.Truncate(TimePrecision) - - // For very large timestamps, UnixNano would overflow an int64, but this - // function requires nanosecond level precision, so we have to use the - // following technique to get round the issue: - // - // 1. Take the normal unix timestamp to form the whole number part of the - // output, - // 2. Take the result of the Nanosecond function, which returns the offset - // within the second of the particular unix time instance, to form the - // decimal part of the output - // 3. Concatenate them to produce the final result - seconds := strconv.FormatInt(truncatedDate.Unix(), 10) - nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64) - - output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...) - - return output, nil -} - -// UnmarshalJSON is an implementation of the json.RawMessage interface and -// deserializes a [NumericDate] from a JSON representation, i.e. a -// [json.Number]. This number represents an UNIX epoch with either integer or -// non-integer seconds. -func (date *NumericDate) UnmarshalJSON(b []byte) (err error) { - var ( - number json.Number - f float64 - ) - - if err = json.Unmarshal(b, &number); err != nil { - return fmt.Errorf("could not parse NumericData: %w", err) - } - - if f, err = number.Float64(); err != nil { - return fmt.Errorf("could not convert json number value to float: %w", err) - } - - n := newNumericDateFromSeconds(f) - *date = *n - - return nil -} - -// ClaimStrings is basically just a slice of strings, but it can be either -// serialized from a string array or just a string. This type is necessary, -// since the "aud" claim can either be a single string or an array. -type ClaimStrings []string - -func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) { - var value interface{} - - if err = json.Unmarshal(data, &value); err != nil { - return err - } - - var aud []string - - switch v := value.(type) { - case string: - aud = append(aud, v) - case []string: - aud = ClaimStrings(v) - case []interface{}: - for _, vv := range v { - vs, ok := vv.(string) - if !ok { - return ErrInvalidType - } - aud = append(aud, vs) - } - case nil: - return nil - default: - return ErrInvalidType - } - - *s = aud - - return -} - -func (s ClaimStrings) MarshalJSON() (b []byte, err error) { - // This handles a special case in the JWT RFC. If the string array, e.g. - // used by the "aud" field, only contains one element, it MAY be serialized - // as a single string. This may or may not be desired based on the ecosystem - // of other JWT library used, so we make it configurable by the variable - // MarshalSingleStringAsArray. - if len(s) == 1 && !MarshalSingleStringAsArray { - return json.Marshal(s[0]) - } - - return json.Marshal([]string(s)) -} diff --git a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/validator.go b/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/validator.go deleted file mode 100644 index 008ecd8..0000000 --- a/backend/services/controller/vendor/github.com/golang-jwt/jwt/v5/validator.go +++ /dev/null @@ -1,316 +0,0 @@ -package jwt - -import ( - "crypto/subtle" - "fmt" - "time" -) - -// ClaimsValidator is an interface that can be implemented by custom claims who -// wish to execute any additional claims validation based on -// application-specific logic. The Validate function is then executed in -// addition to the regular claims validation and any error returned is appended -// to the final validation result. -// -// type MyCustomClaims struct { -// Foo string `json:"foo"` -// jwt.RegisteredClaims -// } -// -// func (m MyCustomClaims) Validate() error { -// if m.Foo != "bar" { -// return errors.New("must be foobar") -// } -// return nil -// } -type ClaimsValidator interface { - Claims - Validate() error -} - -// Validator is the core of the new Validation API. It is automatically used by -// a [Parser] during parsing and can be modified with various parser options. -// -// The [NewValidator] function should be used to create an instance of this -// struct. -type Validator struct { - // leeway is an optional leeway that can be provided to account for clock skew. - leeway time.Duration - - // timeFunc is used to supply the current time that is needed for - // validation. If unspecified, this defaults to time.Now. - timeFunc func() time.Time - - // requireExp specifies whether the exp claim is required - requireExp bool - - // verifyIat specifies whether the iat (Issued At) claim will be verified. - // According to https://www.rfc-editor.org/rfc/rfc7519#section-4.1.6 this - // only specifies the age of the token, but no validation check is - // necessary. However, if wanted, it can be checked if the iat is - // unrealistic, i.e., in the future. - verifyIat bool - - // expectedAud contains the audience this token expects. Supplying an empty - // string will disable aud checking. - expectedAud string - - // expectedIss contains the issuer this token expects. Supplying an empty - // string will disable iss checking. - expectedIss string - - // expectedSub contains the subject this token expects. Supplying an empty - // string will disable sub checking. - expectedSub string -} - -// NewValidator can be used to create a stand-alone validator with the supplied -// options. This validator can then be used to validate already parsed claims. -// -// Note: Under normal circumstances, explicitly creating a validator is not -// needed and can potentially be dangerous; instead functions of the [Parser] -// class should be used. -// -// The [Validator] is only checking the *validity* of the claims, such as its -// expiration time, but it does NOT perform *signature verification* of the -// token. -func NewValidator(opts ...ParserOption) *Validator { - p := NewParser(opts...) - return p.validator -} - -// Validate validates the given claims. It will also perform any custom -// validation if claims implements the [ClaimsValidator] interface. -// -// Note: It will NOT perform any *signature verification* on the token that -// contains the claims and expects that the [Claim] was already successfully -// verified. -func (v *Validator) Validate(claims Claims) error { - var ( - now time.Time - errs []error = make([]error, 0, 6) - err error - ) - - // Check, if we have a time func - if v.timeFunc != nil { - now = v.timeFunc() - } else { - now = time.Now() - } - - // We always need to check the expiration time, but usage of the claim - // itself is OPTIONAL by default. requireExp overrides this behavior - // and makes the exp claim mandatory. - if err = v.verifyExpiresAt(claims, now, v.requireExp); err != nil { - errs = append(errs, err) - } - - // We always need to check not-before, but usage of the claim itself is - // OPTIONAL. - if err = v.verifyNotBefore(claims, now, false); err != nil { - errs = append(errs, err) - } - - // Check issued-at if the option is enabled - if v.verifyIat { - if err = v.verifyIssuedAt(claims, now, false); err != nil { - errs = append(errs, err) - } - } - - // If we have an expected audience, we also require the audience claim - if v.expectedAud != "" { - if err = v.verifyAudience(claims, v.expectedAud, true); err != nil { - errs = append(errs, err) - } - } - - // If we have an expected issuer, we also require the issuer claim - if v.expectedIss != "" { - if err = v.verifyIssuer(claims, v.expectedIss, true); err != nil { - errs = append(errs, err) - } - } - - // If we have an expected subject, we also require the subject claim - if v.expectedSub != "" { - if err = v.verifySubject(claims, v.expectedSub, true); err != nil { - errs = append(errs, err) - } - } - - // Finally, we want to give the claim itself some possibility to do some - // additional custom validation based on a custom Validate function. - cvt, ok := claims.(ClaimsValidator) - if ok { - if err := cvt.Validate(); err != nil { - errs = append(errs, err) - } - } - - if len(errs) == 0 { - return nil - } - - return joinErrors(errs...) -} - -// verifyExpiresAt compares the exp claim in claims against cmp. This function -// will succeed if cmp < exp. Additional leeway is taken into account. -// -// If exp is not set, it will succeed if the claim is not required, -// otherwise ErrTokenRequiredClaimMissing will be returned. -// -// Additionally, if any error occurs while retrieving the claim, e.g., when its -// the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *Validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) error { - exp, err := claims.GetExpirationTime() - if err != nil { - return err - } - - if exp == nil { - return errorIfRequired(required, "exp") - } - - return errorIfFalse(cmp.Before((exp.Time).Add(+v.leeway)), ErrTokenExpired) -} - -// verifyIssuedAt compares the iat claim in claims against cmp. This function -// will succeed if cmp >= iat. Additional leeway is taken into account. -// -// If iat is not set, it will succeed if the claim is not required, -// otherwise ErrTokenRequiredClaimMissing will be returned. -// -// Additionally, if any error occurs while retrieving the claim, e.g., when its -// the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *Validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) error { - iat, err := claims.GetIssuedAt() - if err != nil { - return err - } - - if iat == nil { - return errorIfRequired(required, "iat") - } - - return errorIfFalse(!cmp.Before(iat.Add(-v.leeway)), ErrTokenUsedBeforeIssued) -} - -// verifyNotBefore compares the nbf claim in claims against cmp. This function -// will return true if cmp >= nbf. Additional leeway is taken into account. -// -// If nbf is not set, it will succeed if the claim is not required, -// otherwise ErrTokenRequiredClaimMissing will be returned. -// -// Additionally, if any error occurs while retrieving the claim, e.g., when its -// the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *Validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) error { - nbf, err := claims.GetNotBefore() - if err != nil { - return err - } - - if nbf == nil { - return errorIfRequired(required, "nbf") - } - - return errorIfFalse(!cmp.Before(nbf.Add(-v.leeway)), ErrTokenNotValidYet) -} - -// verifyAudience compares the aud claim against cmp. -// -// If aud is not set or an empty list, it will succeed if the claim is not required, -// otherwise ErrTokenRequiredClaimMissing will be returned. -// -// Additionally, if any error occurs while retrieving the claim, e.g., when its -// the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *Validator) verifyAudience(claims Claims, cmp string, required bool) error { - aud, err := claims.GetAudience() - if err != nil { - return err - } - - if len(aud) == 0 { - return errorIfRequired(required, "aud") - } - - // use a var here to keep constant time compare when looping over a number of claims - result := false - - var stringClaims string - for _, a := range aud { - if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { - result = true - } - stringClaims = stringClaims + a - } - - // case where "" is sent in one or many aud claims - if stringClaims == "" { - return errorIfRequired(required, "aud") - } - - return errorIfFalse(result, ErrTokenInvalidAudience) -} - -// verifyIssuer compares the iss claim in claims against cmp. -// -// If iss is not set, it will succeed if the claim is not required, -// otherwise ErrTokenRequiredClaimMissing will be returned. -// -// Additionally, if any error occurs while retrieving the claim, e.g., when its -// the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *Validator) verifyIssuer(claims Claims, cmp string, required bool) error { - iss, err := claims.GetIssuer() - if err != nil { - return err - } - - if iss == "" { - return errorIfRequired(required, "iss") - } - - return errorIfFalse(iss == cmp, ErrTokenInvalidIssuer) -} - -// verifySubject compares the sub claim against cmp. -// -// If sub is not set, it will succeed if the claim is not required, -// otherwise ErrTokenRequiredClaimMissing will be returned. -// -// Additionally, if any error occurs while retrieving the claim, e.g., when its -// the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *Validator) verifySubject(claims Claims, cmp string, required bool) error { - sub, err := claims.GetSubject() - if err != nil { - return err - } - - if sub == "" { - return errorIfRequired(required, "sub") - } - - return errorIfFalse(sub == cmp, ErrTokenInvalidSubject) -} - -// errorIfFalse returns the error specified in err, if the value is true. -// Otherwise, nil is returned. -func errorIfFalse(value bool, err error) error { - if value { - return nil - } else { - return err - } -} - -// errorIfRequired returns an ErrTokenRequiredClaimMissing error if required is -// true. Otherwise, nil is returned. -func errorIfRequired(required bool, claim string) error { - if required { - return newError(fmt.Sprintf("%s claim is required", claim), ErrTokenRequiredClaimMissing) - } else { - return nil - } -} diff --git a/backend/services/controller/vendor/github.com/golang/snappy/.gitignore b/backend/services/controller/vendor/github.com/golang/snappy/.gitignore deleted file mode 100644 index 042091d..0000000 --- a/backend/services/controller/vendor/github.com/golang/snappy/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -cmd/snappytool/snappytool -testdata/bench - -# These explicitly listed benchmark data files are for an obsolete version of -# snappy_test.go. -testdata/alice29.txt -testdata/asyoulik.txt -testdata/fireworks.jpeg -testdata/geo.protodata -testdata/html -testdata/html_x_4 -testdata/kppkn.gtb -testdata/lcet10.txt -testdata/paper-100k.pdf -testdata/plrabn12.txt -testdata/urls.10K diff --git a/backend/services/controller/vendor/github.com/golang/snappy/AUTHORS b/backend/services/controller/vendor/github.com/golang/snappy/AUTHORS deleted file mode 100644 index bcfa195..0000000 --- a/backend/services/controller/vendor/github.com/golang/snappy/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Damian Gryski -Google Inc. -Jan Mercl <0xjnml@gmail.com> -Rodolfo Carvalho -Sebastien Binet diff --git a/backend/services/controller/vendor/github.com/golang/snappy/CONTRIBUTORS b/backend/services/controller/vendor/github.com/golang/snappy/CONTRIBUTORS deleted file mode 100644 index 931ae31..0000000 --- a/backend/services/controller/vendor/github.com/golang/snappy/CONTRIBUTORS +++ /dev/null @@ -1,37 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Damian Gryski -Jan Mercl <0xjnml@gmail.com> -Kai Backman -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Rodolfo Carvalho -Russ Cox -Sebastien Binet diff --git a/backend/services/controller/vendor/github.com/golang/snappy/LICENSE b/backend/services/controller/vendor/github.com/golang/snappy/LICENSE deleted file mode 100644 index 6050c10..0000000 --- a/backend/services/controller/vendor/github.com/golang/snappy/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/backend/services/controller/vendor/github.com/golang/snappy/README b/backend/services/controller/vendor/github.com/golang/snappy/README deleted file mode 100644 index cea1287..0000000 --- a/backend/services/controller/vendor/github.com/golang/snappy/README +++ /dev/null @@ -1,107 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. - - - -Benchmarks. - -The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten -or so files, the same set used by the C++ Snappy code (github.com/google/snappy -and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ -3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: - -"go test -test.bench=." - -_UFlat0-8 2.19GB/s ± 0% html -_UFlat1-8 1.41GB/s ± 0% urls -_UFlat2-8 23.5GB/s ± 2% jpg -_UFlat3-8 1.91GB/s ± 0% jpg_200 -_UFlat4-8 14.0GB/s ± 1% pdf -_UFlat5-8 1.97GB/s ± 0% html4 -_UFlat6-8 814MB/s ± 0% txt1 -_UFlat7-8 785MB/s ± 0% txt2 -_UFlat8-8 857MB/s ± 0% txt3 -_UFlat9-8 719MB/s ± 1% txt4 -_UFlat10-8 2.84GB/s ± 0% pb -_UFlat11-8 1.05GB/s ± 0% gaviota - -_ZFlat0-8 1.04GB/s ± 0% html -_ZFlat1-8 534MB/s ± 0% urls -_ZFlat2-8 15.7GB/s ± 1% jpg -_ZFlat3-8 740MB/s ± 3% jpg_200 -_ZFlat4-8 9.20GB/s ± 1% pdf -_ZFlat5-8 991MB/s ± 0% html4 -_ZFlat6-8 379MB/s ± 0% txt1 -_ZFlat7-8 352MB/s ± 0% txt2 -_ZFlat8-8 396MB/s ± 1% txt3 -_ZFlat9-8 327MB/s ± 1% txt4 -_ZFlat10-8 1.33GB/s ± 1% pb -_ZFlat11-8 605MB/s ± 1% gaviota - - - -"go test -test.bench=. -tags=noasm" - -_UFlat0-8 621MB/s ± 2% html -_UFlat1-8 494MB/s ± 1% urls -_UFlat2-8 23.2GB/s ± 1% jpg -_UFlat3-8 1.12GB/s ± 1% jpg_200 -_UFlat4-8 4.35GB/s ± 1% pdf -_UFlat5-8 609MB/s ± 0% html4 -_UFlat6-8 296MB/s ± 0% txt1 -_UFlat7-8 288MB/s ± 0% txt2 -_UFlat8-8 309MB/s ± 1% txt3 -_UFlat9-8 280MB/s ± 1% txt4 -_UFlat10-8 753MB/s ± 0% pb -_UFlat11-8 400MB/s ± 0% gaviota - -_ZFlat0-8 409MB/s ± 1% html -_ZFlat1-8 250MB/s ± 1% urls -_ZFlat2-8 12.3GB/s ± 1% jpg -_ZFlat3-8 132MB/s ± 0% jpg_200 -_ZFlat4-8 2.92GB/s ± 0% pdf -_ZFlat5-8 405MB/s ± 1% html4 -_ZFlat6-8 179MB/s ± 1% txt1 -_ZFlat7-8 170MB/s ± 1% txt2 -_ZFlat8-8 189MB/s ± 1% txt3 -_ZFlat9-8 164MB/s ± 1% txt4 -_ZFlat10-8 479MB/s ± 1% pb -_ZFlat11-8 270MB/s ± 1% gaviota - - - -For comparison (Go's encoded output is byte-for-byte identical to C++'s), here -are the numbers from C++ Snappy's - -make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log - -BM_UFlat/0 2.4GB/s html -BM_UFlat/1 1.4GB/s urls -BM_UFlat/2 21.8GB/s jpg -BM_UFlat/3 1.5GB/s jpg_200 -BM_UFlat/4 13.3GB/s pdf -BM_UFlat/5 2.1GB/s html4 -BM_UFlat/6 1.0GB/s txt1 -BM_UFlat/7 959.4MB/s txt2 -BM_UFlat/8 1.0GB/s txt3 -BM_UFlat/9 864.5MB/s txt4 -BM_UFlat/10 2.9GB/s pb -BM_UFlat/11 1.2GB/s gaviota - -BM_ZFlat/0 944.3MB/s html (22.31 %) -BM_ZFlat/1 501.6MB/s urls (47.78 %) -BM_ZFlat/2 14.3GB/s jpg (99.95 %) -BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) -BM_ZFlat/4 8.3GB/s pdf (83.30 %) -BM_ZFlat/5 903.5MB/s html4 (22.52 %) -BM_ZFlat/6 336.0MB/s txt1 (57.88 %) -BM_ZFlat/7 312.3MB/s txt2 (61.91 %) -BM_ZFlat/8 353.1MB/s txt3 (54.99 %) -BM_ZFlat/9 289.9MB/s txt4 (66.26 %) -BM_ZFlat/10 1.2GB/s pb (19.68 %) -BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/backend/services/controller/vendor/github.com/golang/snappy/decode.go b/backend/services/controller/vendor/github.com/golang/snappy/decode.go deleted file mode 100644 index 72efb03..0000000 --- a/backend/services/controller/vendor/github.com/golang/snappy/decode.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } - if !r.readFull(r.buf[:4], true) { - return 0, r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return 0, r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return 0, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return 0, r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.decoded[:n], false) { - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return 0, r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return 0, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return 0, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return 0, r.err - } - } -} diff --git a/backend/services/controller/vendor/github.com/golang/snappy/decode_amd64.go b/backend/services/controller/vendor/github.com/golang/snappy/decode_amd64.go deleted file mode 100644 index fcd192b..0000000 --- a/backend/services/controller/vendor/github.com/golang/snappy/decode_amd64.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// decode has the same semantics as in decode_other.go. -// -//go:noescape -func decode(dst, src []byte) int diff --git a/backend/services/controller/vendor/github.com/golang/snappy/decode_amd64.s b/backend/services/controller/vendor/github.com/golang/snappy/decode_amd64.s deleted file mode 100644 index e6179f6..0000000 --- a/backend/services/controller/vendor/github.com/golang/snappy/decode_amd64.s +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - AX scratch -// - BX scratch -// - CX length or x -// - DX offset -// - SI &src[s] -// - DI &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy -// -// The registers R8-R13 (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. -// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. -TEXT ·decode(SB), NOSPLIT, $48-56 - // Initialize SI, DI and R8-R13. - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, DI - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, SI - MOVQ R11, R13 - ADDQ R12, R13 - -loop: - // for s < len(src) - CMPQ SI, R13 - JEQ end - - // CX = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBLZX (SI), CX - MOVL CX, BX - ANDL $3, BX - CMPL BX, $1 - JAE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - SHRL $2, CX - CMPL CX, $60 - JAE tagLit60Plus - - // case x < 60: - // s++ - INCQ SI - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that CX == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // CX can hold 64 bits, so the increment cannot overflow. - INCQ CX - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // AX = len(dst) - d - // BX = len(src) - s - MOVQ R10, AX - SUBQ DI, AX - MOVQ R13, BX - SUBQ SI, BX - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMPQ CX, $16 - JGT callMemmove - CMPQ AX, $16 - JLT callMemmove - CMPQ BX, $16 - JLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(SI), X0 - MOVOU X0, 0(DI) - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMPQ CX, AX - JGT errCorrupt - CMPQ CX, BX - JGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // DI, SI and CX as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, R13 - ADDQ R12, R13 - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADDQ CX, SI - SUBQ $58, SI - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // case x == 60: - CMPL CX, $61 - JEQ tagLit61 - JA tagLit62Plus - - // x = uint32(src[s-1]) - MOVBLZX -1(SI), CX - JMP doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVWLZX -2(SI), CX - JMP doLit - -tagLit62Plus: - CMPL CX, $62 - JA tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVWLZX -3(SI), CX - MOVBLZX -1(SI), BX - SHLL $16, BX - ORL BX, CX - JMP doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVL -4(SI), CX - JMP doLit - -// The code above handles literal tags. -// ---------------------------------------- -// The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADDQ $5, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-5])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVLQZX -4(SI), DX - JMP doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADDQ $3, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-3])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVWQZX -2(SI), DX - JMP doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - BX == src[s] & 0x03 - // - CX == src[s] - CMPQ BX, $2 - JEQ tagCopy2 - JA tagCopy4 - - // case tagCopy1: - // s += 2 - ADDQ $2, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVQ CX, DX - ANDQ $0xe0, DX - SHLQ $3, DX - MOVBQZX -1(SI), BX - ORQ BX, DX - - // length = 4 + int(src[s-2])>>2&0x7 - SHRQ $2, CX - ANDQ $7, CX - ADDQ $4, CX - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - CX == length && CX > 0 - // - DX == offset - - // if offset <= 0 { etc } - CMPQ DX, $0 - JLE errCorrupt - - // if d < offset { etc } - MOVQ DI, BX - SUBQ R8, BX - CMPQ BX, DX - JLT errCorrupt - - // if length > len(dst)-d { etc } - MOVQ R10, BX - SUBQ DI, BX - CMPQ CX, BX - JGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVQ R10, R14 - SUBQ DI, R14 - MOVQ DI, R15 - SUBQ DX, R15 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMPQ CX, $16 - JGT slowForwardCopy - CMPQ DX, $8 - JLT slowForwardCopy - CMPQ R14, $16 - JLT slowForwardCopy - MOVQ 0(R15), AX - MOVQ AX, 0(DI) - MOVQ 8(R15), BX - MOVQ BX, 8(DI) - ADDQ CX, DI - JMP loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUBQ $10, R14 - CMPQ CX, R14 - JGT verySlowForwardCopy - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. - // } - CMPQ DX, $8 - JGE fixUpSlowForwardCopy - MOVQ (R15), BX - MOVQ BX, (DI) - SUBQ DX, CX - ADDQ DX, DI - ADDQ DX, DX - JMP makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by DI being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save DI to AX so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVQ DI, AX - ADDQ CX, DI - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - CMPQ CX, $0 - JLE loop - MOVQ (R15), BX - MOVQ BX, (AX) - ADDQ $8, R15 - ADDQ $8, AX - SUBQ $8, CX - JMP finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R15), BX - MOVB BX, (DI) - INCQ R15 - INCQ DI - DECQ CX - JNZ verySlowForwardCopy - JMP loop - -// The code above handles copy tags. -// ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMPQ DI, R10 - JNE errCorrupt - - // return 0 - MOVQ $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVQ $1, ret+48(FP) - RET diff --git a/backend/services/controller/vendor/github.com/golang/snappy/decode_other.go b/backend/services/controller/vendor/github.com/golang/snappy/decode_other.go deleted file mode 100644 index 8c9f204..0000000 --- a/backend/services/controller/vendor/github.com/golang/snappy/decode_other.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine !gc noasm - -package snappy - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike - // the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - for end := d + length; d != end; d++ { - dst[d] = dst[d-offset] - } - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/backend/services/controller/vendor/github.com/golang/snappy/encode.go b/backend/services/controller/vendor/github.com/golang/snappy/encode.go deleted file mode 100644 index 8d393e9..0000000 --- a/backend/services/controller/vendor/github.com/golang/snappy/encode.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/backend/services/controller/vendor/github.com/golang/snappy/encode_amd64.go b/backend/services/controller/vendor/github.com/golang/snappy/encode_amd64.go deleted file mode 100644 index 150d91b..0000000 --- a/backend/services/controller/vendor/github.com/golang/snappy/encode_amd64.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// emitLiteral has the same semantics as in encode_other.go. -// -//go:noescape -func emitLiteral(dst, lit []byte) int - -// emitCopy has the same semantics as in encode_other.go. -// -//go:noescape -func emitCopy(dst []byte, offset, length int) int - -// extendMatch has the same semantics as in encode_other.go. -// -//go:noescape -func extendMatch(src []byte, i, j int) int - -// encodeBlock has the same semantics as in encode_other.go. -// -//go:noescape -func encodeBlock(dst, src []byte) (d int) diff --git a/backend/services/controller/vendor/github.com/golang/snappy/encode_amd64.s b/backend/services/controller/vendor/github.com/golang/snappy/encode_amd64.s deleted file mode 100644 index adfd979..0000000 --- a/backend/services/controller/vendor/github.com/golang/snappy/encode_amd64.s +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a -// Go toolchain regression. See https://github.com/golang/go/issues/15426 and -// https://github.com/golang/snappy/issues/29 -// -// As a workaround, the package was built with a known good assembler, and -// those instructions were disassembled by "objdump -d" to yield the -// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 -// style comments, in AT&T asm syntax. Note that rsp here is a physical -// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). -// The instructions were then encoded as "BYTE $0x.." sequences, which assemble -// fine on Go 1.6. - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - AX len(lit) -// - BX n -// - DX return value -// - DI &dst[i] -// - R10 &lit[0] -// -// The 24 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $24-56 - MOVQ dst_base+0(FP), DI - MOVQ lit_base+24(FP), R10 - MOVQ lit_len+32(FP), AX - MOVQ AX, DX - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT oneByte - CMPL BX, $256 - JLT twoBytes - -threeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - ADDQ $3, DX - JMP memmove - -twoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - ADDQ $2, DX - JMP memmove - -oneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - ADDQ $1, DX - -memmove: - MOVQ DX, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - AX length -// - SI &dst[0] -// - DI &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVQ dst_base+0(FP), DI - MOVQ DI, SI - MOVQ offset+24(FP), R11 - MOVQ length+32(FP), AX - -loop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP loop0 - -step1: - // if length > 64 { etc } - CMPL AX, $64 - JLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMPL AX, $12 - JGE step3 - CMPL R11, $2048 - JGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - DX &src[0] -// - SI &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVQ src_base+0(FP), DX - MOVQ src_len+8(FP), R14 - MOVQ i+24(FP), R15 - MOVQ j+32(FP), SI - ADDQ DX, R14 - ADDQ DX, R15 - ADDQ DX, SI - MOVQ R14, R13 - SUBQ $8, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA cmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE bsf - ADDQ $8, R15 - ADDQ $8, SI - JMP cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE extendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE extendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - AX . . -// - BX . . -// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). -// - DX 64 &src[0], tableSize -// - SI 72 &src[s] -// - DI 80 &dst[d] -// - R9 88 sLimit -// - R10 . &src[nextEmit] -// - R11 96 prevHash, currHash, nextHash, offset -// - R12 104 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 112 candidate -// -// The second column (56, 64, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. -TEXT ·encodeBlock(SB), 0, $32888-56 - MOVQ dst_base+0(FP), DI - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVQ $24, CX - MOVQ $256, DX - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - CMPQ DX, $16384 - JGE varTable - CMPQ DX, R14 - JGE varTable - SUBQ $1, CX - SHLQ $1, DX - JMP calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU - // writes 16 bytes, so we can do only tableSize/8 writes instead of the - // 2048 writes that would zero-initialize all of table's 32768 bytes. - SHRQ $3, DX - LEAQ table-32768(SP), BX - PXOR X0, X0 - -memclr: - MOVOU X0, 0(BX) - ADDQ $16, BX - SUBQ $1, DX - JNZ memclr - - // !!! DX = &src[0] - MOVQ SI, DX - - // sLimit := len(src) - inputMargin - MOVQ R14, R9 - SUBQ $15, R9 - - // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't - // change for the rest of the function. - MOVQ CX, 56(SP) - MOVQ DX, 64(SP) - MOVQ R9, 88(SP) - - // nextEmit := 0 - MOVQ DX, R10 - - // s := 1 - ADDQ $1, SI - - // nextHash := hash(load32(src, s), shift) - MOVL 0(SI), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - -outer: - // for { etc } - - // skip := 32 - MOVQ $32, R12 - - // nextS := s - MOVQ SI, R13 - - // candidate := 0 - MOVQ $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVQ R13, SI - - // bytesBetweenHashLookups := skip >> 5 - MOVQ R12, R14 - SHRQ $5, R14 - - // nextS = s + bytesBetweenHashLookups - ADDQ R14, R13 - - // skip += bytesBetweenHashLookups - ADDQ R14, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVQ R13, AX - SUBQ DX, AX - CMPQ AX, R9 - JA emitRemainder - - // candidate = int(table[nextHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[nextHash] = uint16(s) - MOVQ SI, AX - SUBQ DX, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // nextHash = hash(load32(src, nextS), shift) - MOVL 0(R13), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVL 0(SI), AX - MOVL (DX)(R15*1), BX - CMPL AX, BX - JNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVQ SI, AX - SUBQ R10, AX - CMPQ AX, $16 - JLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT inlineEmitLiteralOneByte - CMPL BX, $256 - JLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVQ SI, 72(SP) - MOVQ DI, 80(SP) - MOVQ R15, 112(SP) - CALL runtime·memmove(SB) - MOVQ 56(SP), CX - MOVQ 64(SP), DX - MOVQ 72(SP), SI - MOVQ 80(SP), DI - MOVQ 88(SP), R9 - MOVQ 112(SP), R15 - JMP inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB AX, BX - SUBB $1, BX - SHLB $2, BX - MOVB BX, (DI) - ADDQ $1, DI - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(R10), X0 - MOVOU X0, 0(DI) - ADDQ AX, DI - -inner1: - // for { etc } - - // base := s - MOVQ SI, R12 - - // !!! offset := base - candidate - MOVQ R12, R11 - SUBQ R15, R11 - SUBQ DX, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVQ src_len+32(FP), R14 - ADDQ DX, R14 - - // !!! R13 = &src[len(src) - 8] - MOVQ R14, R13 - SUBQ $8, R13 - - // !!! R15 = &src[candidate + 4] - ADDQ $4, R15 - ADDQ DX, R15 - - // !!! s += 4 - ADDQ $4, SI - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA inlineExtendMatchCmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE inlineExtendMatchBSF - ADDQ $8, R15 - ADDQ $8, SI - JMP inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - JMP inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE inlineExtendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE inlineExtendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVQ SI, AX - SUBQ R12, AX - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - CMPL AX, $64 - JLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - CMPL AX, $12 - JGE inlineEmitCopyStep3 - CMPL R11, $2048 - JGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - JMP inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVQ SI, R10 - - // if s >= sLimit { goto emitRemainder } - MOVQ SI, AX - SUBQ DX, AX - CMPQ AX, R9 - JAE emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVQ -1(SI), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // table[prevHash] = uint16(s-1) - MOVQ SI, AX - SUBQ DX, AX - SUBQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // currHash := hash(uint32(x>>8), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // candidate = int(table[currHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[currHash] = uint16(s) - ADDQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVL (DX)(R15*1), BX - CMPL R14, BX - JEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // s++ - ADDQ $1, SI - - // break out of the inner1 for loop, i.e. continue the outer loop. - JMP outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVQ src_len+32(FP), AX - ADDQ DX, AX - CMPQ R10, AX - JEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVQ DI, 0(SP) - MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ R10, 24(SP) - SUBQ R10, AX - MOVQ AX, 32(SP) - MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVQ DI, 80(SP) - CALL ·emitLiteral(SB) - MOVQ 80(SP), DI - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADDQ 48(SP), DI - -encodeBlockEnd: - MOVQ dst_base+0(FP), AX - SUBQ AX, DI - MOVQ DI, d+48(FP) - RET diff --git a/backend/services/controller/vendor/github.com/golang/snappy/encode_other.go b/backend/services/controller/vendor/github.com/golang/snappy/encode_other.go deleted file mode 100644 index dbcae90..0000000 --- a/backend/services/controller/vendor/github.com/golang/snappy/encode_other.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine !gc noasm - -package snappy - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/backend/services/controller/vendor/github.com/golang/snappy/snappy.go b/backend/services/controller/vendor/github.com/golang/snappy/snappy.go deleted file mode 100644 index ece692e..0000000 --- a/backend/services/controller/vendor/github.com/golang/snappy/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snappy implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snappy // import "github.com/golang/snappy" - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/backend/services/controller/vendor/github.com/google/uuid/.travis.yml b/backend/services/controller/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a6..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/backend/services/controller/vendor/github.com/google/uuid/CONTRIBUTING.md b/backend/services/controller/vendor/github.com/google/uuid/CONTRIBUTING.md deleted file mode 100644 index 04fdf09..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/backend/services/controller/vendor/github.com/google/uuid/CONTRIBUTORS b/backend/services/controller/vendor/github.com/google/uuid/CONTRIBUTORS deleted file mode 100644 index b4bb97f..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/CONTRIBUTORS +++ /dev/null @@ -1,9 +0,0 @@ -Paul Borman -bmatsuo -shawnps -theory -jboverfelt -dsymonds -cd1 -wallclockbuilder -dansouza diff --git a/backend/services/controller/vendor/github.com/google/uuid/LICENSE b/backend/services/controller/vendor/github.com/google/uuid/LICENSE deleted file mode 100644 index 5dc6826..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/backend/services/controller/vendor/github.com/google/uuid/README.md b/backend/services/controller/vendor/github.com/google/uuid/README.md deleted file mode 100644 index f765a46..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) -and DCE 1.1: Authentication and Security Services. - -This package is based on the github.com/pborman/uuid package (previously named -code.google.com/p/go-uuid). It differs from these earlier packages in that -a UUID is a 16 byte array rather than a byte slice. One loss due to this -change is the ability to represent an invalid UUID (vs a NIL UUID). - -###### Install -`go get github.com/google/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) - -Full `go doc` style documentation for the package can be viewed online without -installing this package by using the GoDoc site here: -http://pkg.go.dev/github.com/google/uuid diff --git a/backend/services/controller/vendor/github.com/google/uuid/dce.go b/backend/services/controller/vendor/github.com/google/uuid/dce.go deleted file mode 100644 index fa820b9..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/dce.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) (UUID, error) { - uuid, err := NewUUID() - if err == nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid, err -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCESecurity(Person, uint32(os.Getuid())) -func NewDCEPerson() (UUID, error) { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCESecurity(Group, uint32(os.Getgid())) -func NewDCEGroup() (UUID, error) { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID. Domains are only defined -// for Version 2 UUIDs. -func (uuid UUID) Domain() Domain { - return Domain(uuid[9]) -} - -// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 -// UUIDs. -func (uuid UUID) ID() uint32 { - return binary.BigEndian.Uint32(uuid[0:4]) -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/backend/services/controller/vendor/github.com/google/uuid/doc.go b/backend/services/controller/vendor/github.com/google/uuid/doc.go deleted file mode 100644 index 5b8a4b9..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uuid generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to -// maps or compared directly. -package uuid diff --git a/backend/services/controller/vendor/github.com/google/uuid/hash.go b/backend/services/controller/vendor/github.com/google/uuid/hash.go deleted file mode 100644 index b404f4b..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known namespace IDs and UUIDs -var ( - NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) - Nil UUID // empty UUID, all zeros -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space[:]) //nolint:errcheck - h.Write(data) //nolint:errcheck - s := h.Sum(nil) - var uuid UUID - copy(uuid[:], s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/backend/services/controller/vendor/github.com/google/uuid/marshal.go b/backend/services/controller/vendor/github.com/google/uuid/marshal.go deleted file mode 100644 index 14bd340..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/marshal.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "fmt" - -// MarshalText implements encoding.TextMarshaler. -func (uuid UUID) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], uuid) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (uuid *UUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err != nil { - return err - } - *uuid = id - return nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (uuid UUID) MarshalBinary() ([]byte, error) { - return uuid[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (uuid *UUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(uuid[:], data) - return nil -} diff --git a/backend/services/controller/vendor/github.com/google/uuid/node.go b/backend/services/controller/vendor/github.com/google/uuid/node.go deleted file mode 100644 index d651a2b..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/node.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "sync" -) - -var ( - nodeMu sync.Mutex - ifname string // name of interface being used - nodeID [6]byte // hardware for version 1 UUIDs - zeroID [6]byte // nodeID with only 0's -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - defer nodeMu.Unlock() - nodeMu.Lock() - return ifname -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - defer nodeMu.Unlock() - nodeMu.Lock() - return setNodeInterface(name) -} - -func setNodeInterface(name string) bool { - iname, addr := getHardwareInterface(name) // null implementation for js - if iname != "" && addr != nil { - ifname = iname - copy(nodeID[:], addr) - return true - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - ifname = "random" - randomBits(nodeID[:]) - return true - } - return false -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - defer nodeMu.Unlock() - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nid := nodeID - return nid[:] -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - defer nodeMu.Unlock() - nodeMu.Lock() - copy(nodeID[:], id) - ifname = "user" - return true -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - var node [6]byte - copy(node[:], uuid[10:]) - return node[:] -} diff --git a/backend/services/controller/vendor/github.com/google/uuid/node_js.go b/backend/services/controller/vendor/github.com/google/uuid/node_js.go deleted file mode 100644 index 24b78ed..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/node_js.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build js - -package uuid - -// getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. -// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. -func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/backend/services/controller/vendor/github.com/google/uuid/node_net.go b/backend/services/controller/vendor/github.com/google/uuid/node_net.go deleted file mode 100644 index 0cbbcdd..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/node_net.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !js - -package uuid - -import "net" - -var interfaces []net.Interface // cached list of interfaces - -// getHardwareInterface returns the name and hardware address of interface name. -// If name is "" then the name and hardware address of one of the system's -// interfaces is returned. If no interfaces are found (name does not exist or -// there are no interfaces) then "", nil is returned. -// -// Only addresses of at least 6 bytes are returned. -func getHardwareInterface(name string) (string, []byte) { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil { - return "", nil - } - } - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - return ifs.Name, ifs.HardwareAddr - } - } - return "", nil -} diff --git a/backend/services/controller/vendor/github.com/google/uuid/null.go b/backend/services/controller/vendor/github.com/google/uuid/null.go deleted file mode 100644 index d7fcbf2..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/null.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2021 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "database/sql/driver" - "encoding/json" - "fmt" -) - -var jsonNull = []byte("null") - -// NullUUID represents a UUID that may be null. -// NullUUID implements the SQL driver.Scanner interface so -// it can be used as a scan destination: -// -// var u uuid.NullUUID -// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) -// ... -// if u.Valid { -// // use u.UUID -// } else { -// // NULL value -// } -// -type NullUUID struct { - UUID UUID - Valid bool // Valid is true if UUID is not NULL -} - -// Scan implements the SQL driver.Scanner interface. -func (nu *NullUUID) Scan(value interface{}) error { - if value == nil { - nu.UUID, nu.Valid = Nil, false - return nil - } - - err := nu.UUID.Scan(value) - if err != nil { - nu.Valid = false - return err - } - - nu.Valid = true - return nil -} - -// Value implements the driver Valuer interface. -func (nu NullUUID) Value() (driver.Value, error) { - if !nu.Valid { - return nil, nil - } - // Delegate to UUID Value function - return nu.UUID.Value() -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (nu NullUUID) MarshalBinary() ([]byte, error) { - if nu.Valid { - return nu.UUID[:], nil - } - - return []byte(nil), nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (nu *NullUUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(nu.UUID[:], data) - nu.Valid = true - return nil -} - -// MarshalText implements encoding.TextMarshaler. -func (nu NullUUID) MarshalText() ([]byte, error) { - if nu.Valid { - return nu.UUID.MarshalText() - } - - return jsonNull, nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (nu *NullUUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err != nil { - nu.Valid = false - return err - } - nu.UUID = id - nu.Valid = true - return nil -} - -// MarshalJSON implements json.Marshaler. -func (nu NullUUID) MarshalJSON() ([]byte, error) { - if nu.Valid { - return json.Marshal(nu.UUID) - } - - return jsonNull, nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (nu *NullUUID) UnmarshalJSON(data []byte) error { - if bytes.Equal(data, jsonNull) { - *nu = NullUUID{} - return nil // valid null UUID - } - err := json.Unmarshal(data, &nu.UUID) - nu.Valid = err == nil - return err -} diff --git a/backend/services/controller/vendor/github.com/google/uuid/sql.go b/backend/services/controller/vendor/github.com/google/uuid/sql.go deleted file mode 100644 index 2e02ec0..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/sql.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently. -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case string: - // if an empty UUID comes from a table, we return a null UUID - if src == "" { - return nil - } - - // see Parse for required string format - u, err := Parse(src) - if err != nil { - return fmt.Errorf("Scan: %v", err) - } - - *uuid = u - - case []byte: - // if an empty UUID comes from a table, we return a null UUID - if len(src) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(src) != 16 { - return uuid.Scan(string(src)) - } - copy((*uuid)[:], src) - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/backend/services/controller/vendor/github.com/google/uuid/time.go b/backend/services/controller/vendor/github.com/google/uuid/time.go deleted file mode 100644 index e6ef06c..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/time.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "sync" - "time" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - timeMu sync.Mutex - lasttime uint64 // last time we returned - clockSeq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { - defer timeMu.Unlock() - timeMu.Lock() - return getTime() -} - -func getTime() (Time, uint16, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clockSeq == 0 { - setClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), clockSeq, nil -} - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence is used, a new -// random clock sequence is generated the first time a clock sequence is -// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) -func ClockSequence() int { - defer timeMu.Unlock() - timeMu.Lock() - return clockSequence() -} - -func clockSequence() int { - if clockSeq == 0 { - setClockSequence(-1) - } - return int(clockSeq & 0x3fff) -} - -// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - defer timeMu.Unlock() - timeMu.Lock() - setClockSequence(seq) -} - -func setClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - oldSeq := clockSeq - clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if oldSeq != clockSeq { - lasttime = 0 - } -} - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. -func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) -} - -// ClockSequence returns the clock sequence encoded in uuid. -// The clock sequence is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) ClockSequence() int { - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff -} diff --git a/backend/services/controller/vendor/github.com/google/uuid/util.go b/backend/services/controller/vendor/github.com/google/uuid/util.go deleted file mode 100644 index 5ea6c73..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/util.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts hex characters x1 and x2 into a byte. -func xtob(x1, x2 byte) (byte, bool) { - b1 := xvalues[x1] - b2 := xvalues[x2] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/backend/services/controller/vendor/github.com/google/uuid/uuid.go b/backend/services/controller/vendor/github.com/google/uuid/uuid.go deleted file mode 100644 index a57207a..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/uuid.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" - "sync" -) - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID [16]byte - -// A Version represents a UUID's version. -type Version byte - -// A Variant represents a UUID's variant. -type Variant byte - -// Constants returned by Variant. -const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. -) - -const randPoolSize = 16 * 16 - -var ( - rander = rand.Reader // random function - poolEnabled = false - poolMu sync.Mutex - poolPos = randPoolSize // protected with poolMu - pool [randPoolSize]byte // protected with poolMu -) - -type invalidLengthError struct{ len int } - -func (err invalidLengthError) Error() string { - return fmt.Sprintf("invalid UUID length: %d", err.len) -} - -// IsInvalidLengthError is matcher function for custom error invalidLengthError -func IsInvalidLengthError(err error) bool { - _, ok := err.(invalidLengthError) - return ok -} - -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. -func Parse(s string) (UUID, error) { - var uuid UUID - switch len(s) { - // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36: - - // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { - return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) - } - s = s[9:] - - // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - case 36 + 2: - s = s[1:] - - // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - case 32: - var ok bool - for i := range uuid { - uuid[i], ok = xtob(s[i*2], s[i*2+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, invalidLengthError{len(s)} - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(s[x], s[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - var uuid UUID - switch len(b) { - case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { - return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) - } - b = b[9:] - case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - b = b[1:] - case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - var ok bool - for i := 0; i < 32; i += 2 { - uuid[i/2], ok = xtob(b[i], b[i+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, invalidLengthError{len(b)} - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(b[x], b[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// MustParse is like Parse but panics if the string cannot be parsed. -// It simplifies safe initialization of global variables holding compiled UUIDs. -func MustParse(s string) UUID { - uuid, err := Parse(s) - if err != nil { - panic(`uuid: Parse(` + s + `): ` + err.Error()) - } - return uuid -} - -// FromBytes creates a new UUID from a byte slice. Returns an error if the slice -// does not have a length of 16. The bytes are copied from the slice. -func FromBytes(b []byte) (uuid UUID, err error) { - err = uuid.UnmarshalBinary(b) - return uuid, err -} - -// Must returns uuid if err is nil and panics otherwise. -func Must(uuid UUID, err error) UUID { - if err != nil { - panic(err) - } - return uuid -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst, uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. -func (uuid UUID) Variant() Variant { - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. -func (uuid UUID) Version() Version { - return Version(uuid[6] >> 4) -} - -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r -} - -// EnableRandPool enables internal randomness pool used for Random -// (Version 4) UUID generation. The pool contains random bytes read from -// the random number generator on demand in batches. Enabling the pool -// may improve the UUID generation throughput significantly. -// -// Since the pool is stored on the Go heap, this feature may be a bad fit -// for security sensitive applications. -// -// Both EnableRandPool and DisableRandPool are not thread-safe and should -// only be called when there is no possibility that New or any other -// UUID Version 4 generation function will be called concurrently. -func EnableRandPool() { - poolEnabled = true -} - -// DisableRandPool disables the randomness pool if it was previously -// enabled with EnableRandPool. -// -// Both EnableRandPool and DisableRandPool are not thread-safe and should -// only be called when there is no possibility that New or any other -// UUID Version 4 generation function will be called concurrently. -func DisableRandPool() { - poolEnabled = false - defer poolMu.Unlock() - poolMu.Lock() - poolPos = randPoolSize -} diff --git a/backend/services/controller/vendor/github.com/google/uuid/version1.go b/backend/services/controller/vendor/github.com/google/uuid/version1.go deleted file mode 100644 index 4631096..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/version1.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil and an error. -// -// In most cases, New should be used. -func NewUUID() (UUID, error) { - var uuid UUID - now, seq, err := GetTime() - if err != nil { - return uuid, err - } - - timeLow := uint32(now & 0xffffffff) - timeMid := uint16((now >> 32) & 0xffff) - timeHi := uint16((now >> 48) & 0x0fff) - timeHi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], timeLow) - binary.BigEndian.PutUint16(uuid[4:], timeMid) - binary.BigEndian.PutUint16(uuid[6:], timeHi) - binary.BigEndian.PutUint16(uuid[8:], seq) - - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - copy(uuid[10:], nodeID[:]) - nodeMu.Unlock() - - return uuid, nil -} diff --git a/backend/services/controller/vendor/github.com/google/uuid/version4.go b/backend/services/controller/vendor/github.com/google/uuid/version4.go deleted file mode 100644 index 7697802..0000000 --- a/backend/services/controller/vendor/github.com/google/uuid/version4.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "io" - -// New creates a new random UUID or panics. New is equivalent to -// the expression -// -// uuid.Must(uuid.NewRandom()) -func New() UUID { - return Must(NewRandom()) -} - -// NewString creates a new random UUID and returns it as a string or panics. -// NewString is equivalent to the expression -// -// uuid.New().String() -func NewString() string { - return Must(NewRandom()).String() -} - -// NewRandom returns a Random (Version 4) UUID. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// Uses the randomness pool if it was enabled with EnableRandPool. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() (UUID, error) { - if !poolEnabled { - return NewRandomFromReader(rander) - } - return newRandomFromPool() -} - -// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. -func NewRandomFromReader(r io.Reader) (UUID, error) { - var uuid UUID - _, err := io.ReadFull(r, uuid[:]) - if err != nil { - return Nil, err - } - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} - -func newRandomFromPool() (UUID, error) { - var uuid UUID - poolMu.Lock() - if poolPos == randPoolSize { - _, err := io.ReadFull(rander, pool[:]) - if err != nil { - poolMu.Unlock() - return Nil, err - } - poolPos = 0 - } - copy(uuid[:], pool[poolPos:(poolPos+16)]) - poolPos += 16 - poolMu.Unlock() - - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} diff --git a/backend/services/controller/vendor/github.com/gorilla/mux/AUTHORS b/backend/services/controller/vendor/github.com/gorilla/mux/AUTHORS deleted file mode 100644 index b722392..0000000 --- a/backend/services/controller/vendor/github.com/gorilla/mux/AUTHORS +++ /dev/null @@ -1,8 +0,0 @@ -# This is the official list of gorilla/mux authors for copyright purposes. -# -# Please keep the list sorted. - -Google LLC (https://opensource.google.com/) -Kamil Kisielk -Matt Silverlock -Rodrigo Moraes (https://github.com/moraes) diff --git a/backend/services/controller/vendor/github.com/gorilla/mux/LICENSE b/backend/services/controller/vendor/github.com/gorilla/mux/LICENSE deleted file mode 100644 index 6903df6..0000000 --- a/backend/services/controller/vendor/github.com/gorilla/mux/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/backend/services/controller/vendor/github.com/gorilla/mux/README.md b/backend/services/controller/vendor/github.com/gorilla/mux/README.md deleted file mode 100644 index 35eea9f..0000000 --- a/backend/services/controller/vendor/github.com/gorilla/mux/README.md +++ /dev/null @@ -1,805 +0,0 @@ -# gorilla/mux - -[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) -[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux) -[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) - -![Gorilla Logo](https://cloud-cdn.questionable.services/gorilla-icon-64.png) - -https://www.gorillatoolkit.org/pkg/mux - -Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to -their respective handler. - -The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: - -* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. -* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. -* URL hosts, paths and query values can have variables with an optional regular expression. -* Registered URLs can be built, or "reversed", which helps maintaining references to resources. -* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. - ---- - -* [Install](#install) -* [Examples](#examples) -* [Matching Routes](#matching-routes) -* [Static Files](#static-files) -* [Serving Single Page Applications](#serving-single-page-applications) (e.g. React, Vue, Ember.js, etc.) -* [Registered URLs](#registered-urls) -* [Walking Routes](#walking-routes) -* [Graceful Shutdown](#graceful-shutdown) -* [Middleware](#middleware) -* [Handling CORS Requests](#handling-cors-requests) -* [Testing Handlers](#testing-handlers) -* [Full Example](#full-example) - ---- - -## Install - -With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain: - -```sh -go get -u github.com/gorilla/mux -``` - -## Examples - -Let's start registering a couple of URL paths and handlers: - -```go -func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) -} -``` - -Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters. - -Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: - -```go -r := mux.NewRouter() -r.HandleFunc("/products/{key}", ProductHandler) -r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) -r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) -``` - -The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`: - -```go -func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, "Category: %v\n", vars["category"]) -} -``` - -And this is all you need to know about the basic usage. More advanced options are explained below. - -### Matching Routes - -Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: - -```go -r := mux.NewRouter() -// Only matches if domain is "www.example.com". -r.Host("www.example.com") -// Matches a dynamic subdomain. -r.Host("{subdomain:[a-z]+}.example.com") -``` - -There are several other matchers that can be added. To match path prefixes: - -```go -r.PathPrefix("/products/") -``` - -...or HTTP methods: - -```go -r.Methods("GET", "POST") -``` - -...or URL schemes: - -```go -r.Schemes("https") -``` - -...or header values: - -```go -r.Headers("X-Requested-With", "XMLHttpRequest") -``` - -...or query values: - -```go -r.Queries("key", "value") -``` - -...or to use a custom matcher function: - -```go -r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 -}) -``` - -...and finally, it is possible to combine several matchers in a single route: - -```go -r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") -``` - -Routes are tested in the order they were added to the router. If two routes match, the first one wins: - -```go -r := mux.NewRouter() -r.HandleFunc("/specific", specificHandler) -r.PathPrefix("/").Handler(catchAllHandler) -``` - -Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". - -For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it: - -```go -r := mux.NewRouter() -s := r.Host("www.example.com").Subrouter() -``` - -Then register routes in the subrouter: - -```go -s.HandleFunc("/products/", ProductsHandler) -s.HandleFunc("/products/{key}", ProductHandler) -s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) -``` - -The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: - -```go -r := mux.NewRouter() -s := r.PathPrefix("/products").Subrouter() -// "/products/" -s.HandleFunc("/", ProductsHandler) -// "/products/{key}/" -s.HandleFunc("/{key}/", ProductHandler) -// "/products/{key}/details" -s.HandleFunc("/{key}/details", ProductDetailsHandler) -``` - - -### Static Files - -Note that the path provided to `PathPrefix()` represents a "wildcard": calling -`PathPrefix("/static/").Handler(...)` means that the handler will be passed any -request that matches "/static/\*". This makes it easy to serve static files with mux: - -```go -func main() { - var dir string - - flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") - flag.Parse() - r := mux.NewRouter() - - // This will serve files under http://localhost:8000/static/ - r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) - - srv := &http.Server{ - Handler: r, - Addr: "127.0.0.1:8000", - // Good practice: enforce timeouts for servers you create! - WriteTimeout: 15 * time.Second, - ReadTimeout: 15 * time.Second, - } - - log.Fatal(srv.ListenAndServe()) -} -``` - -### Serving Single Page Applications - -Most of the time it makes sense to serve your SPA on a separate web server from your API, -but sometimes it's desirable to serve them both from one place. It's possible to write a simple -handler for serving your SPA (for use with React Router's [BrowserRouter](https://reacttraining.com/react-router/web/api/BrowserRouter) for example), and leverage -mux's powerful routing for your API endpoints. - -```go -package main - -import ( - "encoding/json" - "log" - "net/http" - "os" - "path/filepath" - "time" - - "github.com/gorilla/mux" -) - -// spaHandler implements the http.Handler interface, so we can use it -// to respond to HTTP requests. The path to the static directory and -// path to the index file within that static directory are used to -// serve the SPA in the given static directory. -type spaHandler struct { - staticPath string - indexPath string -} - -// ServeHTTP inspects the URL path to locate a file within the static dir -// on the SPA handler. If a file is found, it will be served. If not, the -// file located at the index path on the SPA handler will be served. This -// is suitable behavior for serving an SPA (single page application). -func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // get the absolute path to prevent directory traversal - path, err := filepath.Abs(r.URL.Path) - if err != nil { - // if we failed to get the absolute path respond with a 400 bad request - // and stop - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - // prepend the path with the path to the static directory - path = filepath.Join(h.staticPath, path) - - // check whether a file exists at the given path - _, err = os.Stat(path) - if os.IsNotExist(err) { - // file does not exist, serve index.html - http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) - return - } else if err != nil { - // if we got an error (that wasn't that the file doesn't exist) stating the - // file, return a 500 internal server error and stop - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - // otherwise, use http.FileServer to serve the static dir - http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) -} - -func main() { - router := mux.NewRouter() - - router.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) { - // an example API handler - json.NewEncoder(w).Encode(map[string]bool{"ok": true}) - }) - - spa := spaHandler{staticPath: "build", indexPath: "index.html"} - router.PathPrefix("/").Handler(spa) - - srv := &http.Server{ - Handler: router, - Addr: "127.0.0.1:8000", - // Good practice: enforce timeouts for servers you create! - WriteTimeout: 15 * time.Second, - ReadTimeout: 15 * time.Second, - } - - log.Fatal(srv.ListenAndServe()) -} -``` - -### Registered URLs - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: - -```go -r := mux.NewRouter() -r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") -``` - -To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: - -```go -url, err := r.Get("article").URL("category", "technology", "id", "42") -``` - -...and the result will be a `url.URL` with the following path: - -``` -"/articles/technology/42" -``` - -This also works for host and query value variables: - -```go -r := mux.NewRouter() -r.Host("{subdomain}.example.com"). - Path("/articles/{category}/{id:[0-9]+}"). - Queries("filter", "{filter}"). - HandlerFunc(ArticleHandler). - Name("article") - -// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla" -url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42", - "filter", "gorilla") -``` - -All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - -```go -r.HeadersRegexp("Content-Type", "application/(text|json)") -``` - -...and the route will match both requests with a Content-Type of `application/json` as well as `application/text` - -There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: - -```go -// "http://news.example.com/" -host, err := r.Get("article").URLHost("subdomain", "news") - -// "/articles/technology/42" -path, err := r.Get("article").URLPath("category", "technology", "id", "42") -``` - -And if you use subrouters, host and path defined separately can be built as well: - -```go -r := mux.NewRouter() -s := r.Host("{subdomain}.example.com").Subrouter() -s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - -// "http://news.example.com/articles/technology/42" -url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") -``` - -### Walking Routes - -The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example, -the following prints all of the registered routes: - -```go -package main - -import ( - "fmt" - "net/http" - "strings" - - "github.com/gorilla/mux" -) - -func handler(w http.ResponseWriter, r *http.Request) { - return -} - -func main() { - r := mux.NewRouter() - r.HandleFunc("/", handler) - r.HandleFunc("/products", handler).Methods("POST") - r.HandleFunc("/articles", handler).Methods("GET") - r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT") - r.HandleFunc("/authors", handler).Queries("surname", "{surname}") - err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { - pathTemplate, err := route.GetPathTemplate() - if err == nil { - fmt.Println("ROUTE:", pathTemplate) - } - pathRegexp, err := route.GetPathRegexp() - if err == nil { - fmt.Println("Path regexp:", pathRegexp) - } - queriesTemplates, err := route.GetQueriesTemplates() - if err == nil { - fmt.Println("Queries templates:", strings.Join(queriesTemplates, ",")) - } - queriesRegexps, err := route.GetQueriesRegexp() - if err == nil { - fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ",")) - } - methods, err := route.GetMethods() - if err == nil { - fmt.Println("Methods:", strings.Join(methods, ",")) - } - fmt.Println() - return nil - }) - - if err != nil { - fmt.Println(err) - } - - http.Handle("/", r) -} -``` - -### Graceful Shutdown - -Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`: - -```go -package main - -import ( - "context" - "flag" - "log" - "net/http" - "os" - "os/signal" - "time" - - "github.com/gorilla/mux" -) - -func main() { - var wait time.Duration - flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m") - flag.Parse() - - r := mux.NewRouter() - // Add your routes as needed - - srv := &http.Server{ - Addr: "0.0.0.0:8080", - // Good practice to set timeouts to avoid Slowloris attacks. - WriteTimeout: time.Second * 15, - ReadTimeout: time.Second * 15, - IdleTimeout: time.Second * 60, - Handler: r, // Pass our instance of gorilla/mux in. - } - - // Run our server in a goroutine so that it doesn't block. - go func() { - if err := srv.ListenAndServe(); err != nil { - log.Println(err) - } - }() - - c := make(chan os.Signal, 1) - // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C) - // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught. - signal.Notify(c, os.Interrupt) - - // Block until we receive our signal. - <-c - - // Create a deadline to wait for. - ctx, cancel := context.WithTimeout(context.Background(), wait) - defer cancel() - // Doesn't block if no connections, but will otherwise wait - // until the timeout deadline. - srv.Shutdown(ctx) - // Optionally, you could run srv.Shutdown in a goroutine and block on - // <-ctx.Done() if your application should wait for other services - // to finalize based on context cancellation. - log.Println("shutting down") - os.Exit(0) -} -``` - -### Middleware - -Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters. -Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking. - -Mux middlewares are defined using the de facto standard type: - -```go -type MiddlewareFunc func(http.Handler) http.Handler -``` - -Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers. - -A very basic middleware which logs the URI of the request being handled could be written as: - -```go -func loggingMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Do stuff here - log.Println(r.RequestURI) - // Call the next handler, which can be another middleware in the chain, or the final handler. - next.ServeHTTP(w, r) - }) -} -``` - -Middlewares can be added to a router using `Router.Use()`: - -```go -r := mux.NewRouter() -r.HandleFunc("/", handler) -r.Use(loggingMiddleware) -``` - -A more complex authentication middleware, which maps session token to users, could be written as: - -```go -// Define our struct -type authenticationMiddleware struct { - tokenUsers map[string]string -} - -// Initialize it somewhere -func (amw *authenticationMiddleware) Populate() { - amw.tokenUsers["00000000"] = "user0" - amw.tokenUsers["aaaaaaaa"] = "userA" - amw.tokenUsers["05f717e5"] = "randomUser" - amw.tokenUsers["deadbeef"] = "user0" -} - -// Middleware function, which will be called for each request -func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - token := r.Header.Get("X-Session-Token") - - if user, found := amw.tokenUsers[token]; found { - // We found the token in our map - log.Printf("Authenticated user %s\n", user) - // Pass down the request to the next middleware (or final handler) - next.ServeHTTP(w, r) - } else { - // Write an error and stop the handler chain - http.Error(w, "Forbidden", http.StatusForbidden) - } - }) -} -``` - -```go -r := mux.NewRouter() -r.HandleFunc("/", handler) - -amw := authenticationMiddleware{} -amw.Populate() - -r.Use(amw.Middleware) -``` - -Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it. - -### Handling CORS Requests - -[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header. - -* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin` -* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route -* If you do not specify any methods, then: -> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers. - -Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers: - -```go -package main - -import ( - "net/http" - "github.com/gorilla/mux" -) - -func main() { - r := mux.NewRouter() - - // IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers - r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions) - r.Use(mux.CORSMethodMiddleware(r)) - - http.ListenAndServe(":8080", r) -} - -func fooHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - if r.Method == http.MethodOptions { - return - } - - w.Write([]byte("foo")) -} -``` - -And an request to `/foo` using something like: - -```bash -curl localhost:8080/foo -v -``` - -Would look like: - -```bash -* Trying ::1... -* TCP_NODELAY set -* Connected to localhost (::1) port 8080 (#0) -> GET /foo HTTP/1.1 -> Host: localhost:8080 -> User-Agent: curl/7.59.0 -> Accept: */* -> -< HTTP/1.1 200 OK -< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS -< Access-Control-Allow-Origin: * -< Date: Fri, 28 Jun 2019 20:13:30 GMT -< Content-Length: 3 -< Content-Type: text/plain; charset=utf-8 -< -* Connection #0 to host localhost left intact -foo -``` - -### Testing Handlers - -Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_. - -First, our simple HTTP handler: - -```go -// endpoints.go -package main - -func HealthCheckHandler(w http.ResponseWriter, r *http.Request) { - // A very simple health check. - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - - // In the future we could report back on the status of our DB, or our cache - // (e.g. Redis) by performing a simple PING, and include them in the response. - io.WriteString(w, `{"alive": true}`) -} - -func main() { - r := mux.NewRouter() - r.HandleFunc("/health", HealthCheckHandler) - - log.Fatal(http.ListenAndServe("localhost:8080", r)) -} -``` - -Our test code: - -```go -// endpoints_test.go -package main - -import ( - "net/http" - "net/http/httptest" - "testing" -) - -func TestHealthCheckHandler(t *testing.T) { - // Create a request to pass to our handler. We don't have any query parameters for now, so we'll - // pass 'nil' as the third parameter. - req, err := http.NewRequest("GET", "/health", nil) - if err != nil { - t.Fatal(err) - } - - // We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response. - rr := httptest.NewRecorder() - handler := http.HandlerFunc(HealthCheckHandler) - - // Our handlers satisfy http.Handler, so we can call their ServeHTTP method - // directly and pass in our Request and ResponseRecorder. - handler.ServeHTTP(rr, req) - - // Check the status code is what we expect. - if status := rr.Code; status != http.StatusOK { - t.Errorf("handler returned wrong status code: got %v want %v", - status, http.StatusOK) - } - - // Check the response body is what we expect. - expected := `{"alive": true}` - if rr.Body.String() != expected { - t.Errorf("handler returned unexpected body: got %v want %v", - rr.Body.String(), expected) - } -} -``` - -In the case that our routes have [variables](#examples), we can pass those in the request. We could write -[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple -possible route variables as needed. - -```go -// endpoints.go -func main() { - r := mux.NewRouter() - // A route with a route variable: - r.HandleFunc("/metrics/{type}", MetricsHandler) - - log.Fatal(http.ListenAndServe("localhost:8080", r)) -} -``` - -Our test file, with a table-driven test of `routeVariables`: - -```go -// endpoints_test.go -func TestMetricsHandler(t *testing.T) { - tt := []struct{ - routeVariable string - shouldPass bool - }{ - {"goroutines", true}, - {"heap", true}, - {"counters", true}, - {"queries", true}, - {"adhadaeqm3k", false}, - } - - for _, tc := range tt { - path := fmt.Sprintf("/metrics/%s", tc.routeVariable) - req, err := http.NewRequest("GET", path, nil) - if err != nil { - t.Fatal(err) - } - - rr := httptest.NewRecorder() - - // Need to create a router that we can pass the request through so that the vars will be added to the context - router := mux.NewRouter() - router.HandleFunc("/metrics/{type}", MetricsHandler) - router.ServeHTTP(rr, req) - - // In this case, our MetricsHandler returns a non-200 response - // for a route variable it doesn't know about. - if rr.Code == http.StatusOK && !tc.shouldPass { - t.Errorf("handler should have failed on routeVariable %s: got %v want %v", - tc.routeVariable, rr.Code, http.StatusOK) - } - } -} -``` - -## Full Example - -Here's a complete, runnable example of a small `mux` based server: - -```go -package main - -import ( - "net/http" - "log" - "github.com/gorilla/mux" -) - -func YourHandler(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("Gorilla!\n")) -} - -func main() { - r := mux.NewRouter() - // Routes consist of a path and a handler function. - r.HandleFunc("/", YourHandler) - - // Bind to a port and pass our router in - log.Fatal(http.ListenAndServe(":8000", r)) -} -``` - -## License - -BSD licensed. See the LICENSE file for details. diff --git a/backend/services/controller/vendor/github.com/gorilla/mux/doc.go b/backend/services/controller/vendor/github.com/gorilla/mux/doc.go deleted file mode 100644 index bd5a38b..0000000 --- a/backend/services/controller/vendor/github.com/gorilla/mux/doc.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package mux implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard -http.ServeMux, mux.Router matches incoming requests against a list of -registered routes and calls a handler for the route that matches the URL -or other conditions. The main features are: - - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts, paths and query values can have variables with an optional - regular expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. - -Let's start registering a couple of URL paths and handlers: - - func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) - } - -Here we register three routes mapping URL paths to handlers. This is -equivalent to how http.HandleFunc() works: if an incoming request URL matches -one of the paths, the corresponding handler is called passing -(http.ResponseWriter, *http.Request) as parameters. - -Paths can have variables. They are defined using the format {name} or -{name:pattern}. If a regular expression pattern is not defined, the matched -variable will be anything until the next slash. For example: - - r := mux.NewRouter() - r.HandleFunc("/products/{key}", ProductHandler) - r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) - -Groups can be used inside patterns, as long as they are non-capturing (?:re). For example: - - r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler) - -The names are used to create a map of route variables which can be retrieved -calling mux.Vars(): - - vars := mux.Vars(request) - category := vars["category"] - -Note that if any capturing groups are present, mux will panic() during parsing. To prevent -this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to -"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably -when capturing groups were present. - -And this is all you need to know about the basic usage. More advanced options -are explained below. - -Routes can also be restricted to a domain or subdomain. Just define a host -pattern to be matched. They can also have variables: - - r := mux.NewRouter() - // Only matches if domain is "www.example.com". - r.Host("www.example.com") - // Matches a dynamic subdomain. - r.Host("{subdomain:[a-z]+}.domain.com") - -There are several other matchers that can be added. To match path prefixes: - - r.PathPrefix("/products/") - -...or HTTP methods: - - r.Methods("GET", "POST") - -...or URL schemes: - - r.Schemes("https") - -...or header values: - - r.Headers("X-Requested-With", "XMLHttpRequest") - -...or query values: - - r.Queries("key", "value") - -...or to use a custom matcher function: - - r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 - }) - -...and finally, it is possible to combine several matchers in a single route: - - r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") - -Setting the same matching conditions again and again can be boring, so we have -a way to group several routes that share the same requirements. -We call it "subrouting". - -For example, let's say we have several URLs that should only match when the -host is "www.example.com". Create a route for that host and get a "subrouter" -from it: - - r := mux.NewRouter() - s := r.Host("www.example.com").Subrouter() - -Then register routes in the subrouter: - - s.HandleFunc("/products/", ProductsHandler) - s.HandleFunc("/products/{key}", ProductHandler) - s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) - -The three URL paths we registered above will only be tested if the domain is -"www.example.com", because the subrouter is tested first. This is not -only convenient, but also optimizes request matching. You can create -subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define -subrouters in a central place and then parts of the app can register its -paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, -the inner routes use it as base for their paths: - - r := mux.NewRouter() - s := r.PathPrefix("/products").Subrouter() - // "/products/" - s.HandleFunc("/", ProductsHandler) - // "/products/{key}/" - s.HandleFunc("/{key}/", ProductHandler) - // "/products/{key}/details" - s.HandleFunc("/{key}/details", ProductDetailsHandler) - -Note that the path provided to PathPrefix() represents a "wildcard": calling -PathPrefix("/static/").Handler(...) means that the handler will be passed any -request that matches "/static/*". This makes it easy to serve static files with mux: - - func main() { - var dir string - - flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") - flag.Parse() - r := mux.NewRouter() - - // This will serve files under http://localhost:8000/static/ - r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) - - srv := &http.Server{ - Handler: r, - Addr: "127.0.0.1:8000", - // Good practice: enforce timeouts for servers you create! - WriteTimeout: 15 * time.Second, - ReadTimeout: 15 * time.Second, - } - - log.Fatal(srv.ListenAndServe()) - } - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, -or "reversed". We define a name calling Name() on a route. For example: - - r := mux.NewRouter() - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") - -To build a URL, get the route and call the URL() method, passing a sequence of -key/value pairs for the route variables. For the previous route, we would do: - - url, err := r.Get("article").URL("category", "technology", "id", "42") - -...and the result will be a url.URL with the following path: - - "/articles/technology/42" - -This also works for host and query value variables: - - r := mux.NewRouter() - r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - Queries("filter", "{filter}"). - HandlerFunc(ArticleHandler). - Name("article") - - // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42", - "filter", "gorilla") - -All variables defined in the route are required, and their values must -conform to the corresponding patterns. These requirements guarantee that a -generated URL will always match a registered route -- the only exception is -for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - - r.HeadersRegexp("Content-Type", "application/(text|json)") - -...and the route will match both requests with a Content-Type of `application/json` as well as -`application/text` - -There's also a way to build only the URL host or path for a route: -use the methods URLHost() or URLPath() instead. For the previous route, -we would do: - - // "http://news.domain.com/" - host, err := r.Get("article").URLHost("subdomain", "news") - - // "/articles/technology/42" - path, err := r.Get("article").URLPath("category", "technology", "id", "42") - -And if you use subrouters, host and path defined separately can be built -as well: - - r := mux.NewRouter() - s := r.Host("{subdomain}.domain.com").Subrouter() - s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") - -Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking. - - type MiddlewareFunc func(http.Handler) http.Handler - -Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created). - -A very basic middleware which logs the URI of the request being handled could be written as: - - func simpleMw(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Do stuff here - log.Println(r.RequestURI) - // Call the next handler, which can be another middleware in the chain, or the final handler. - next.ServeHTTP(w, r) - }) - } - -Middlewares can be added to a router using `Router.Use()`: - - r := mux.NewRouter() - r.HandleFunc("/", handler) - r.Use(simpleMw) - -A more complex authentication middleware, which maps session token to users, could be written as: - - // Define our struct - type authenticationMiddleware struct { - tokenUsers map[string]string - } - - // Initialize it somewhere - func (amw *authenticationMiddleware) Populate() { - amw.tokenUsers["00000000"] = "user0" - amw.tokenUsers["aaaaaaaa"] = "userA" - amw.tokenUsers["05f717e5"] = "randomUser" - amw.tokenUsers["deadbeef"] = "user0" - } - - // Middleware function, which will be called for each request - func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - token := r.Header.Get("X-Session-Token") - - if user, found := amw.tokenUsers[token]; found { - // We found the token in our map - log.Printf("Authenticated user %s\n", user) - next.ServeHTTP(w, r) - } else { - http.Error(w, "Forbidden", http.StatusForbidden) - } - }) - } - - r := mux.NewRouter() - r.HandleFunc("/", handler) - - amw := authenticationMiddleware{tokenUsers: make(map[string]string)} - amw.Populate() - - r.Use(amw.Middleware) - -Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. - -*/ -package mux diff --git a/backend/services/controller/vendor/github.com/gorilla/mux/middleware.go b/backend/services/controller/vendor/github.com/gorilla/mux/middleware.go deleted file mode 100644 index cb51c56..0000000 --- a/backend/services/controller/vendor/github.com/gorilla/mux/middleware.go +++ /dev/null @@ -1,74 +0,0 @@ -package mux - -import ( - "net/http" - "strings" -) - -// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler. -// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed -// to it, and then calls the handler passed as parameter to the MiddlewareFunc. -type MiddlewareFunc func(http.Handler) http.Handler - -// middleware interface is anything which implements a MiddlewareFunc named Middleware. -type middleware interface { - Middleware(handler http.Handler) http.Handler -} - -// Middleware allows MiddlewareFunc to implement the middleware interface. -func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler { - return mw(handler) -} - -// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. -func (r *Router) Use(mwf ...MiddlewareFunc) { - for _, fn := range mwf { - r.middlewares = append(r.middlewares, fn) - } -} - -// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. -func (r *Router) useInterface(mw middleware) { - r.middlewares = append(r.middlewares, mw) -} - -// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header -// on requests for routes that have an OPTIONS method matcher to all the method matchers on -// the route. Routes that do not explicitly handle OPTIONS requests will not be processed -// by the middleware. See examples for usage. -func CORSMethodMiddleware(r *Router) MiddlewareFunc { - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - allMethods, err := getAllMethodsForRoute(r, req) - if err == nil { - for _, v := range allMethods { - if v == http.MethodOptions { - w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ",")) - } - } - } - - next.ServeHTTP(w, req) - }) - } -} - -// getAllMethodsForRoute returns all the methods from method matchers matching a given -// request. -func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) { - var allMethods []string - - for _, route := range r.routes { - var match RouteMatch - if route.Match(req, &match) || match.MatchErr == ErrMethodMismatch { - methods, err := route.GetMethods() - if err != nil { - return nil, err - } - - allMethods = append(allMethods, methods...) - } - } - - return allMethods, nil -} diff --git a/backend/services/controller/vendor/github.com/gorilla/mux/mux.go b/backend/services/controller/vendor/github.com/gorilla/mux/mux.go deleted file mode 100644 index 782a34b..0000000 --- a/backend/services/controller/vendor/github.com/gorilla/mux/mux.go +++ /dev/null @@ -1,606 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "context" - "errors" - "fmt" - "net/http" - "path" - "regexp" -) - -var ( - // ErrMethodMismatch is returned when the method in the request does not match - // the method defined against the route. - ErrMethodMismatch = errors.New("method is not allowed") - // ErrNotFound is returned when no route match is found. - ErrNotFound = errors.New("no matching route was found") -) - -// NewRouter returns a new router instance. -func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route)} -} - -// Router registers routes to be matched and dispatches a handler. -// -// It implements the http.Handler interface, so it can be registered to serve -// requests: -// -// var router = mux.NewRouter() -// -// func main() { -// http.Handle("/", router) -// } -// -// Or, for Google App Engine, register it in a init() function: -// -// func init() { -// http.Handle("/", router) -// } -// -// This will send all incoming requests to the router. -type Router struct { - // Configurable Handler to be used when no route matches. - NotFoundHandler http.Handler - - // Configurable Handler to be used when the request method does not match the route. - MethodNotAllowedHandler http.Handler - - // Routes to be matched, in order. - routes []*Route - - // Routes by name for URL building. - namedRoutes map[string]*Route - - // If true, do not clear the request context after handling the request. - // - // Deprecated: No effect, since the context is stored on the request itself. - KeepContext bool - - // Slice of middlewares to be called after a match is found - middlewares []middleware - - // configuration shared with `Route` - routeConf -} - -// common route configuration shared between `Router` and `Route` -type routeConf struct { - // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" - useEncodedPath bool - - // If true, when the path pattern is "/path/", accessing "/path" will - // redirect to the former and vice versa. - strictSlash bool - - // If true, when the path pattern is "/path//to", accessing "/path//to" - // will not redirect - skipClean bool - - // Manager for the variables from host and path. - regexp routeRegexpGroup - - // List of matchers. - matchers []matcher - - // The scheme used when building URLs. - buildScheme string - - buildVarsFunc BuildVarsFunc -} - -// returns an effective deep copy of `routeConf` -func copyRouteConf(r routeConf) routeConf { - c := r - - if r.regexp.path != nil { - c.regexp.path = copyRouteRegexp(r.regexp.path) - } - - if r.regexp.host != nil { - c.regexp.host = copyRouteRegexp(r.regexp.host) - } - - c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries)) - for _, q := range r.regexp.queries { - c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q)) - } - - c.matchers = make([]matcher, len(r.matchers)) - copy(c.matchers, r.matchers) - - return c -} - -func copyRouteRegexp(r *routeRegexp) *routeRegexp { - c := *r - return &c -} - -// Match attempts to match the given request against the router's registered routes. -// -// If the request matches a route of this router or one of its subrouters the Route, -// Handler, and Vars fields of the the match argument are filled and this function -// returns true. -// -// If the request does not match any of this router's or its subrouters' routes -// then this function returns false. If available, a reason for the match failure -// will be filled in the match argument's MatchErr field. If the match failure type -// (eg: not found) has a registered handler, the handler is assigned to the Handler -// field of the match argument. -func (r *Router) Match(req *http.Request, match *RouteMatch) bool { - for _, route := range r.routes { - if route.Match(req, match) { - // Build middleware chain if no error was found - if match.MatchErr == nil { - for i := len(r.middlewares) - 1; i >= 0; i-- { - match.Handler = r.middlewares[i].Middleware(match.Handler) - } - } - return true - } - } - - if match.MatchErr == ErrMethodMismatch { - if r.MethodNotAllowedHandler != nil { - match.Handler = r.MethodNotAllowedHandler - return true - } - - return false - } - - // Closest match for a router (includes sub-routers) - if r.NotFoundHandler != nil { - match.Handler = r.NotFoundHandler - match.MatchErr = ErrNotFound - return true - } - - match.MatchErr = ErrNotFound - return false -} - -// ServeHTTP dispatches the handler registered in the matched route. -// -// When there is a match, the route variables can be retrieved calling -// mux.Vars(request). -func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if !r.skipClean { - path := req.URL.Path - if r.useEncodedPath { - path = req.URL.EscapedPath() - } - // Clean path to canonical form and redirect. - if p := cleanPath(path); p != path { - - // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. - // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: - // http://code.google.com/p/go/issues/detail?id=5252 - url := *req.URL - url.Path = p - p = url.String() - - w.Header().Set("Location", p) - w.WriteHeader(http.StatusMovedPermanently) - return - } - } - var match RouteMatch - var handler http.Handler - if r.Match(req, &match) { - handler = match.Handler - req = requestWithVars(req, match.Vars) - req = requestWithRoute(req, match.Route) - } - - if handler == nil && match.MatchErr == ErrMethodMismatch { - handler = methodNotAllowedHandler() - } - - if handler == nil { - handler = http.NotFoundHandler() - } - - handler.ServeHTTP(w, req) -} - -// Get returns a route registered with the given name. -func (r *Router) Get(name string) *Route { - return r.namedRoutes[name] -} - -// GetRoute returns a route registered with the given name. This method -// was renamed to Get() and remains here for backwards compatibility. -func (r *Router) GetRoute(name string) *Route { - return r.namedRoutes[name] -} - -// StrictSlash defines the trailing slash behavior for new routes. The initial -// value is false. -// -// When true, if the route path is "/path/", accessing "/path" will perform a redirect -// to the former and vice versa. In other words, your application will always -// see the path as specified in the route. -// -// When false, if the route path is "/path", accessing "/path/" will not match -// this route and vice versa. -// -// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for -// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed -// request will be made as a GET by most clients. Use middleware or client settings -// to modify this behaviour as needed. -// -// Special case: when a route sets a path prefix using the PathPrefix() method, -// strict slash is ignored for that route because the redirect behavior can't -// be determined from a prefix alone. However, any subrouters created from that -// route inherit the original StrictSlash setting. -func (r *Router) StrictSlash(value bool) *Router { - r.strictSlash = value - return r -} - -// SkipClean defines the path cleaning behaviour for new routes. The initial -// value is false. Users should be careful about which routes are not cleaned -// -// When true, if the route path is "/path//to", it will remain with the double -// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/ -// -// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will -// become /fetch/http/xkcd.com/534 -func (r *Router) SkipClean(value bool) *Router { - r.skipClean = value - return r -} - -// UseEncodedPath tells the router to match the encoded original path -// to the routes. -// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to". -// -// If not called, the router will match the unencoded path to the routes. -// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to" -func (r *Router) UseEncodedPath() *Router { - r.useEncodedPath = true - return r -} - -// ---------------------------------------------------------------------------- -// Route factories -// ---------------------------------------------------------------------------- - -// NewRoute registers an empty route. -func (r *Router) NewRoute() *Route { - // initialize a route with a copy of the parent router's configuration - route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} - r.routes = append(r.routes, route) - return route -} - -// Name registers a new route with a name. -// See Route.Name(). -func (r *Router) Name(name string) *Route { - return r.NewRoute().Name(name) -} - -// Handle registers a new route with a matcher for the URL path. -// See Route.Path() and Route.Handler(). -func (r *Router) Handle(path string, handler http.Handler) *Route { - return r.NewRoute().Path(path).Handler(handler) -} - -// HandleFunc registers a new route with a matcher for the URL path. -// See Route.Path() and Route.HandlerFunc(). -func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, - *http.Request)) *Route { - return r.NewRoute().Path(path).HandlerFunc(f) -} - -// Headers registers a new route with a matcher for request header values. -// See Route.Headers(). -func (r *Router) Headers(pairs ...string) *Route { - return r.NewRoute().Headers(pairs...) -} - -// Host registers a new route with a matcher for the URL host. -// See Route.Host(). -func (r *Router) Host(tpl string) *Route { - return r.NewRoute().Host(tpl) -} - -// MatcherFunc registers a new route with a custom matcher function. -// See Route.MatcherFunc(). -func (r *Router) MatcherFunc(f MatcherFunc) *Route { - return r.NewRoute().MatcherFunc(f) -} - -// Methods registers a new route with a matcher for HTTP methods. -// See Route.Methods(). -func (r *Router) Methods(methods ...string) *Route { - return r.NewRoute().Methods(methods...) -} - -// Path registers a new route with a matcher for the URL path. -// See Route.Path(). -func (r *Router) Path(tpl string) *Route { - return r.NewRoute().Path(tpl) -} - -// PathPrefix registers a new route with a matcher for the URL path prefix. -// See Route.PathPrefix(). -func (r *Router) PathPrefix(tpl string) *Route { - return r.NewRoute().PathPrefix(tpl) -} - -// Queries registers a new route with a matcher for URL query values. -// See Route.Queries(). -func (r *Router) Queries(pairs ...string) *Route { - return r.NewRoute().Queries(pairs...) -} - -// Schemes registers a new route with a matcher for URL schemes. -// See Route.Schemes(). -func (r *Router) Schemes(schemes ...string) *Route { - return r.NewRoute().Schemes(schemes...) -} - -// BuildVarsFunc registers a new route with a custom function for modifying -// route variables before building a URL. -func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { - return r.NewRoute().BuildVarsFunc(f) -} - -// Walk walks the router and all its sub-routers, calling walkFn for each route -// in the tree. The routes are walked in the order they were added. Sub-routers -// are explored depth-first. -func (r *Router) Walk(walkFn WalkFunc) error { - return r.walk(walkFn, []*Route{}) -} - -// SkipRouter is used as a return value from WalkFuncs to indicate that the -// router that walk is about to descend down to should be skipped. -var SkipRouter = errors.New("skip this router") - -// WalkFunc is the type of the function called for each route visited by Walk. -// At every invocation, it is given the current route, and the current router, -// and a list of ancestor routes that lead to the current route. -type WalkFunc func(route *Route, router *Router, ancestors []*Route) error - -func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { - for _, t := range r.routes { - err := walkFn(t, r, ancestors) - if err == SkipRouter { - continue - } - if err != nil { - return err - } - for _, sr := range t.matchers { - if h, ok := sr.(*Router); ok { - ancestors = append(ancestors, t) - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - ancestors = ancestors[:len(ancestors)-1] - } - } - if h, ok := t.handler.(*Router); ok { - ancestors = append(ancestors, t) - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - ancestors = ancestors[:len(ancestors)-1] - } - } - return nil -} - -// ---------------------------------------------------------------------------- -// Context -// ---------------------------------------------------------------------------- - -// RouteMatch stores information about a matched route. -type RouteMatch struct { - Route *Route - Handler http.Handler - Vars map[string]string - - // MatchErr is set to appropriate matching error - // It is set to ErrMethodMismatch if there is a mismatch in - // the request method and route method - MatchErr error -} - -type contextKey int - -const ( - varsKey contextKey = iota - routeKey -) - -// Vars returns the route variables for the current request, if any. -func Vars(r *http.Request) map[string]string { - if rv := r.Context().Value(varsKey); rv != nil { - return rv.(map[string]string) - } - return nil -} - -// CurrentRoute returns the matched route for the current request, if any. -// This only works when called inside the handler of the matched route -// because the matched route is stored in the request context which is cleared -// after the handler returns. -func CurrentRoute(r *http.Request) *Route { - if rv := r.Context().Value(routeKey); rv != nil { - return rv.(*Route) - } - return nil -} - -func requestWithVars(r *http.Request, vars map[string]string) *http.Request { - ctx := context.WithValue(r.Context(), varsKey, vars) - return r.WithContext(ctx) -} - -func requestWithRoute(r *http.Request, route *Route) *http.Request { - ctx := context.WithValue(r.Context(), routeKey, route) - return r.WithContext(ctx) -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -// cleanPath returns the canonical path for p, eliminating . and .. elements. -// Borrowed from the net/http package. -func cleanPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root; - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - - return np -} - -// uniqueVars returns an error if two slices contain duplicated strings. -func uniqueVars(s1, s2 []string) error { - for _, v1 := range s1 { - for _, v2 := range s2 { - if v1 == v2 { - return fmt.Errorf("mux: duplicated route variable %q", v2) - } - } - } - return nil -} - -// checkPairs returns the count of strings passed in, and an error if -// the count is not an even number. -func checkPairs(pairs ...string) (int, error) { - length := len(pairs) - if length%2 != 0 { - return length, fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - } - return length, nil -} - -// mapFromPairsToString converts variadic string parameters to a -// string to string map. -func mapFromPairsToString(pairs ...string) (map[string]string, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]string, length/2) - for i := 0; i < length; i += 2 { - m[pairs[i]] = pairs[i+1] - } - return m, nil -} - -// mapFromPairsToRegex converts variadic string parameters to a -// string to regex map. -func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]*regexp.Regexp, length/2) - for i := 0; i < length; i += 2 { - regex, err := regexp.Compile(pairs[i+1]) - if err != nil { - return nil, err - } - m[pairs[i]] = regex - } - return m, nil -} - -// matchInArray returns true if the given string value is in the array. -func matchInArray(arr []string, value string) bool { - for _, v := range arr { - if v == value { - return true - } - } - return false -} - -// matchMapWithString returns true if the given key/value pairs exist in a given map. -func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != "" { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v == value { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} - -// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against -// the given regex -func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != nil { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v.MatchString(value) { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} - -// methodNotAllowed replies to the request with an HTTP status code 405. -func methodNotAllowed(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusMethodNotAllowed) -} - -// methodNotAllowedHandler returns a simple request handler -// that replies to each request with a status code 405. -func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) } diff --git a/backend/services/controller/vendor/github.com/gorilla/mux/regexp.go b/backend/services/controller/vendor/github.com/gorilla/mux/regexp.go deleted file mode 100644 index 0144842..0000000 --- a/backend/services/controller/vendor/github.com/gorilla/mux/regexp.go +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "fmt" - "net/http" - "net/url" - "regexp" - "strconv" - "strings" -) - -type routeRegexpOptions struct { - strictSlash bool - useEncodedPath bool -} - -type regexpType int - -const ( - regexpTypePath regexpType = 0 - regexpTypeHost regexpType = 1 - regexpTypePrefix regexpType = 2 - regexpTypeQuery regexpType = 3 -) - -// newRouteRegexp parses a route template and returns a routeRegexp, -// used to match a host, a path or a query string. -// -// It will extract named variables, assemble a regexp to be matched, create -// a "reverse" template to build URLs and compile regexps to validate variable -// values used in URL building. -// -// Previously we accepted only Python-like identifiers for variable -// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that -// name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) { - // Check if it is well-formed. - idxs, errBraces := braceIndices(tpl) - if errBraces != nil { - return nil, errBraces - } - // Backup the original. - template := tpl - // Now let's parse it. - defaultPattern := "[^/]+" - if typ == regexpTypeQuery { - defaultPattern = ".*" - } else if typ == regexpTypeHost { - defaultPattern = "[^.]+" - } - // Only match strict slash if not matching - if typ != regexpTypePath { - options.strictSlash = false - } - // Set a flag for strictSlash. - endSlash := false - if options.strictSlash && strings.HasSuffix(tpl, "/") { - tpl = tpl[:len(tpl)-1] - endSlash = true - } - varsN := make([]string, len(idxs)/2) - varsR := make([]*regexp.Regexp, len(idxs)/2) - pattern := bytes.NewBufferString("") - pattern.WriteByte('^') - reverse := bytes.NewBufferString("") - var end int - var err error - for i := 0; i < len(idxs); i += 2 { - // Set all values we are interested in. - raw := tpl[end:idxs[i]] - end = idxs[i+1] - parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) - name := parts[0] - patt := defaultPattern - if len(parts) == 2 { - patt = parts[1] - } - // Name or pattern can't be empty. - if name == "" || patt == "" { - return nil, fmt.Errorf("mux: missing name or pattern in %q", - tpl[idxs[i]:end]) - } - // Build the regexp pattern. - fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt) - - // Build the reverse template. - fmt.Fprintf(reverse, "%s%%s", raw) - - // Append variable name and compiled pattern. - varsN[i/2] = name - varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) - if err != nil { - return nil, err - } - } - // Add the remaining. - raw := tpl[end:] - pattern.WriteString(regexp.QuoteMeta(raw)) - if options.strictSlash { - pattern.WriteString("[/]?") - } - if typ == regexpTypeQuery { - // Add the default pattern if the query value is empty - if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { - pattern.WriteString(defaultPattern) - } - } - if typ != regexpTypePrefix { - pattern.WriteByte('$') - } - - var wildcardHostPort bool - if typ == regexpTypeHost { - if !strings.Contains(pattern.String(), ":") { - wildcardHostPort = true - } - } - reverse.WriteString(raw) - if endSlash { - reverse.WriteByte('/') - } - // Compile full regexp. - reg, errCompile := regexp.Compile(pattern.String()) - if errCompile != nil { - return nil, errCompile - } - - // Check for capturing groups which used to work in older versions - if reg.NumSubexp() != len(idxs)/2 { - panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) + - "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)") - } - - // Done! - return &routeRegexp{ - template: template, - regexpType: typ, - options: options, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, - wildcardHostPort: wildcardHostPort, - }, nil -} - -// routeRegexp stores a regexp to match a host or path and information to -// collect and validate route variables. -type routeRegexp struct { - // The unmodified template. - template string - // The type of match - regexpType regexpType - // Options for matching - options routeRegexpOptions - // Expanded regexp. - regexp *regexp.Regexp - // Reverse template. - reverse string - // Variable names. - varsN []string - // Variable regexps (validators). - varsR []*regexp.Regexp - // Wildcard host-port (no strict port match in hostname) - wildcardHostPort bool -} - -// Match matches the regexp against the URL host or path. -func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { - if r.regexpType == regexpTypeHost { - host := getHost(req) - if r.wildcardHostPort { - // Don't be strict on the port match - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - } - return r.regexp.MatchString(host) - } - - if r.regexpType == regexpTypeQuery { - return r.matchQueryString(req) - } - path := req.URL.Path - if r.options.useEncodedPath { - path = req.URL.EscapedPath() - } - return r.regexp.MatchString(path) -} - -// url builds a URL part using the given values. -func (r *routeRegexp) url(values map[string]string) (string, error) { - urlValues := make([]interface{}, len(r.varsN), len(r.varsN)) - for k, v := range r.varsN { - value, ok := values[v] - if !ok { - return "", fmt.Errorf("mux: missing route variable %q", v) - } - if r.regexpType == regexpTypeQuery { - value = url.QueryEscape(value) - } - urlValues[k] = value - } - rv := fmt.Sprintf(r.reverse, urlValues...) - if !r.regexp.MatchString(rv) { - // The URL is checked against the full regexp, instead of checking - // individual variables. This is faster but to provide a good error - // message, we check individual regexps if the URL doesn't match. - for k, v := range r.varsN { - if !r.varsR[k].MatchString(values[v]) { - return "", fmt.Errorf( - "mux: variable %q doesn't match, expected %q", values[v], - r.varsR[k].String()) - } - } - } - return rv, nil -} - -// getURLQuery returns a single query parameter from a request URL. -// For a URL with foo=bar&baz=ding, we return only the relevant key -// value pair for the routeRegexp. -func (r *routeRegexp) getURLQuery(req *http.Request) string { - if r.regexpType != regexpTypeQuery { - return "" - } - templateKey := strings.SplitN(r.template, "=", 2)[0] - val, ok := findFirstQueryKey(req.URL.RawQuery, templateKey) - if ok { - return templateKey + "=" + val - } - return "" -} - -// findFirstQueryKey returns the same result as (*url.URL).Query()[key][0]. -// If key was not found, empty string and false is returned. -func findFirstQueryKey(rawQuery, key string) (value string, ok bool) { - query := []byte(rawQuery) - for len(query) > 0 { - foundKey := query - if i := bytes.IndexAny(foundKey, "&;"); i >= 0 { - foundKey, query = foundKey[:i], foundKey[i+1:] - } else { - query = query[:0] - } - if len(foundKey) == 0 { - continue - } - var value []byte - if i := bytes.IndexByte(foundKey, '='); i >= 0 { - foundKey, value = foundKey[:i], foundKey[i+1:] - } - if len(foundKey) < len(key) { - // Cannot possibly be key. - continue - } - keyString, err := url.QueryUnescape(string(foundKey)) - if err != nil { - continue - } - if keyString != key { - continue - } - valueString, err := url.QueryUnescape(string(value)) - if err != nil { - continue - } - return valueString, true - } - return "", false -} - -func (r *routeRegexp) matchQueryString(req *http.Request) bool { - return r.regexp.MatchString(r.getURLQuery(req)) -} - -// braceIndices returns the first level curly brace indices from a string. -// It returns an error in case of unbalanced braces. -func braceIndices(s string) ([]int, error) { - var level, idx int - var idxs []int - for i := 0; i < len(s); i++ { - switch s[i] { - case '{': - if level++; level == 1 { - idx = i - } - case '}': - if level--; level == 0 { - idxs = append(idxs, idx, i+1) - } else if level < 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - } - } - if level != 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - return idxs, nil -} - -// varGroupName builds a capturing group name for the indexed variable. -func varGroupName(idx int) string { - return "v" + strconv.Itoa(idx) -} - -// ---------------------------------------------------------------------------- -// routeRegexpGroup -// ---------------------------------------------------------------------------- - -// routeRegexpGroup groups the route matchers that carry variables. -type routeRegexpGroup struct { - host *routeRegexp - path *routeRegexp - queries []*routeRegexp -} - -// setMatch extracts the variables from the URL once a route matches. -func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { - // Store host variables. - if v.host != nil { - host := getHost(req) - if v.host.wildcardHostPort { - // Don't be strict on the port match - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - } - matches := v.host.regexp.FindStringSubmatchIndex(host) - if len(matches) > 0 { - extractVars(host, matches, v.host.varsN, m.Vars) - } - } - path := req.URL.Path - if r.useEncodedPath { - path = req.URL.EscapedPath() - } - // Store path variables. - if v.path != nil { - matches := v.path.regexp.FindStringSubmatchIndex(path) - if len(matches) > 0 { - extractVars(path, matches, v.path.varsN, m.Vars) - // Check if we should redirect. - if v.path.options.strictSlash { - p1 := strings.HasSuffix(path, "/") - p2 := strings.HasSuffix(v.path.template, "/") - if p1 != p2 { - u, _ := url.Parse(req.URL.String()) - if p1 { - u.Path = u.Path[:len(u.Path)-1] - } else { - u.Path += "/" - } - m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently) - } - } - } - } - // Store query string variables. - for _, q := range v.queries { - queryURL := q.getURLQuery(req) - matches := q.regexp.FindStringSubmatchIndex(queryURL) - if len(matches) > 0 { - extractVars(queryURL, matches, q.varsN, m.Vars) - } - } -} - -// getHost tries its best to return the request host. -// According to section 14.23 of RFC 2616 the Host header -// can include the port number if the default value of 80 is not used. -func getHost(r *http.Request) string { - if r.URL.IsAbs() { - return r.URL.Host - } - return r.Host -} - -func extractVars(input string, matches []int, names []string, output map[string]string) { - for i, name := range names { - output[name] = input[matches[2*i+2]:matches[2*i+3]] - } -} diff --git a/backend/services/controller/vendor/github.com/gorilla/mux/route.go b/backend/services/controller/vendor/github.com/gorilla/mux/route.go deleted file mode 100644 index 750afe5..0000000 --- a/backend/services/controller/vendor/github.com/gorilla/mux/route.go +++ /dev/null @@ -1,736 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "regexp" - "strings" -) - -// Route stores information to match a request and build URLs. -type Route struct { - // Request handler for the route. - handler http.Handler - // If true, this route never matches: it is only used to build URLs. - buildOnly bool - // The name used to build URLs. - name string - // Error resulted from building a route. - err error - - // "global" reference to all named routes - namedRoutes map[string]*Route - - // config possibly passed in from `Router` - routeConf -} - -// SkipClean reports whether path cleaning is enabled for this route via -// Router.SkipClean. -func (r *Route) SkipClean() bool { - return r.skipClean -} - -// Match matches the route against the request. -func (r *Route) Match(req *http.Request, match *RouteMatch) bool { - if r.buildOnly || r.err != nil { - return false - } - - var matchErr error - - // Match everything. - for _, m := range r.matchers { - if matched := m.Match(req, match); !matched { - if _, ok := m.(methodMatcher); ok { - matchErr = ErrMethodMismatch - continue - } - - // Ignore ErrNotFound errors. These errors arise from match call - // to Subrouters. - // - // This prevents subsequent matching subrouters from failing to - // run middleware. If not ignored, the middleware would see a - // non-nil MatchErr and be skipped, even when there was a - // matching route. - if match.MatchErr == ErrNotFound { - match.MatchErr = nil - } - - matchErr = nil - return false - } - } - - if matchErr != nil { - match.MatchErr = matchErr - return false - } - - if match.MatchErr == ErrMethodMismatch && r.handler != nil { - // We found a route which matches request method, clear MatchErr - match.MatchErr = nil - // Then override the mis-matched handler - match.Handler = r.handler - } - - // Yay, we have a match. Let's collect some info about it. - if match.Route == nil { - match.Route = r - } - if match.Handler == nil { - match.Handler = r.handler - } - if match.Vars == nil { - match.Vars = make(map[string]string) - } - - // Set variables. - r.regexp.setMatch(req, match, r) - return true -} - -// ---------------------------------------------------------------------------- -// Route attributes -// ---------------------------------------------------------------------------- - -// GetError returns an error resulted from building the route, if any. -func (r *Route) GetError() error { - return r.err -} - -// BuildOnly sets the route to never match: it is only used to build URLs. -func (r *Route) BuildOnly() *Route { - r.buildOnly = true - return r -} - -// Handler -------------------------------------------------------------------- - -// Handler sets a handler for the route. -func (r *Route) Handler(handler http.Handler) *Route { - if r.err == nil { - r.handler = handler - } - return r -} - -// HandlerFunc sets a handler function for the route. -func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { - return r.Handler(http.HandlerFunc(f)) -} - -// GetHandler returns the handler for the route, if any. -func (r *Route) GetHandler() http.Handler { - return r.handler -} - -// Name ----------------------------------------------------------------------- - -// Name sets the name for the route, used to build URLs. -// It is an error to call Name more than once on a route. -func (r *Route) Name(name string) *Route { - if r.name != "" { - r.err = fmt.Errorf("mux: route already has name %q, can't set %q", - r.name, name) - } - if r.err == nil { - r.name = name - r.namedRoutes[name] = r - } - return r -} - -// GetName returns the name for the route, if any. -func (r *Route) GetName() string { - return r.name -} - -// ---------------------------------------------------------------------------- -// Matchers -// ---------------------------------------------------------------------------- - -// matcher types try to match a request. -type matcher interface { - Match(*http.Request, *RouteMatch) bool -} - -// addMatcher adds a matcher to the route. -func (r *Route) addMatcher(m matcher) *Route { - if r.err == nil { - r.matchers = append(r.matchers, m) - } - return r -} - -// addRegexpMatcher adds a host or path matcher and builder to a route. -func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error { - if r.err != nil { - return r.err - } - if typ == regexpTypePath || typ == regexpTypePrefix { - if len(tpl) > 0 && tpl[0] != '/' { - return fmt.Errorf("mux: path must start with a slash, got %q", tpl) - } - if r.regexp.path != nil { - tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl - } - } - rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{ - strictSlash: r.strictSlash, - useEncodedPath: r.useEncodedPath, - }) - if err != nil { - return err - } - for _, q := range r.regexp.queries { - if err = uniqueVars(rr.varsN, q.varsN); err != nil { - return err - } - } - if typ == regexpTypeHost { - if r.regexp.path != nil { - if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { - return err - } - } - r.regexp.host = rr - } else { - if r.regexp.host != nil { - if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { - return err - } - } - if typ == regexpTypeQuery { - r.regexp.queries = append(r.regexp.queries, rr) - } else { - r.regexp.path = rr - } - } - r.addMatcher(rr) - return nil -} - -// Headers -------------------------------------------------------------------- - -// headerMatcher matches the request against header values. -type headerMatcher map[string]string - -func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithString(m, r.Header, true) -} - -// Headers adds a matcher for request header values. -// It accepts a sequence of key/value pairs to be matched. For example: -// -// r := mux.NewRouter() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both request header values match. -// If the value is an empty string, it will match any value if the key is set. -func (r *Route) Headers(pairs ...string) *Route { - if r.err == nil { - var headers map[string]string - headers, r.err = mapFromPairsToString(pairs...) - return r.addMatcher(headerMatcher(headers)) - } - return r -} - -// headerRegexMatcher matches the request against the route given a regex for the header -type headerRegexMatcher map[string]*regexp.Regexp - -func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithRegex(m, r.Header, true) -} - -// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex -// support. For example: -// -// r := mux.NewRouter() -// r.HeadersRegexp("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both the request header matches both regular expressions. -// If the value is an empty string, it will match any value if the key is set. -// Use the start and end of string anchors (^ and $) to match an exact value. -func (r *Route) HeadersRegexp(pairs ...string) *Route { - if r.err == nil { - var headers map[string]*regexp.Regexp - headers, r.err = mapFromPairsToRegex(pairs...) - return r.addMatcher(headerRegexMatcher(headers)) - } - return r -} - -// Host ----------------------------------------------------------------------- - -// Host adds a matcher for the URL host. -// It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next dot. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Host("www.example.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Host(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypeHost) - return r -} - -// MatcherFunc ---------------------------------------------------------------- - -// MatcherFunc is the function signature used by custom matchers. -type MatcherFunc func(*http.Request, *RouteMatch) bool - -// Match returns the match for a given request. -func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { - return m(r, match) -} - -// MatcherFunc adds a custom function to be used as request matcher. -func (r *Route) MatcherFunc(f MatcherFunc) *Route { - return r.addMatcher(f) -} - -// Methods -------------------------------------------------------------------- - -// methodMatcher matches the request against HTTP methods. -type methodMatcher []string - -func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.Method) -} - -// Methods adds a matcher for HTTP methods. -// It accepts a sequence of one or more methods to be matched, e.g.: -// "GET", "POST", "PUT". -func (r *Route) Methods(methods ...string) *Route { - for k, v := range methods { - methods[k] = strings.ToUpper(v) - } - return r.addMatcher(methodMatcher(methods)) -} - -// Path ----------------------------------------------------------------------- - -// Path adds a matcher for the URL path. -// It accepts a template with zero or more URL variables enclosed by {}. The -// template must start with a "/". -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Path(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypePath) - return r -} - -// PathPrefix ----------------------------------------------------------------- - -// PathPrefix adds a matcher for the URL path prefix. This matches if the given -// template is a prefix of the full URL path. See Route.Path() for details on -// the tpl argument. -// -// Note that it does not treat slashes specially ("/foobar/" will be matched by -// the prefix "/foo") so you may want to use a trailing slash here. -// -// Also note that the setting of Router.StrictSlash() has no effect on routes -// with a PathPrefix matcher. -func (r *Route) PathPrefix(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypePrefix) - return r -} - -// Query ---------------------------------------------------------------------- - -// Queries adds a matcher for URL query values. -// It accepts a sequence of key/value pairs. Values may define variables. -// For example: -// -// r := mux.NewRouter() -// r.Queries("foo", "bar", "id", "{id:[0-9]+}") -// -// The above route will only match if the URL contains the defined queries -// values, e.g.: ?foo=bar&id=42. -// -// If the value is an empty string, it will match any value if the key is set. -// -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -func (r *Route) Queries(pairs ...string) *Route { - length := len(pairs) - if length%2 != 0 { - r.err = fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - return nil - } - for i := 0; i < length; i += 2 { - if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil { - return r - } - } - - return r -} - -// Schemes -------------------------------------------------------------------- - -// schemeMatcher matches the request against URL schemes. -type schemeMatcher []string - -func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { - scheme := r.URL.Scheme - // https://golang.org/pkg/net/http/#Request - // "For [most] server requests, fields other than Path and RawQuery will be - // empty." - // Since we're an http muxer, the scheme is either going to be http or https - // though, so we can just set it based on the tls termination state. - if scheme == "" { - if r.TLS == nil { - scheme = "http" - } else { - scheme = "https" - } - } - return matchInArray(m, scheme) -} - -// Schemes adds a matcher for URL schemes. -// It accepts a sequence of schemes to be matched, e.g.: "http", "https". -// If the request's URL has a scheme set, it will be matched against. -// Generally, the URL scheme will only be set if a previous handler set it, -// such as the ProxyHeaders handler from gorilla/handlers. -// If unset, the scheme will be determined based on the request's TLS -// termination state. -// The first argument to Schemes will be used when constructing a route URL. -func (r *Route) Schemes(schemes ...string) *Route { - for k, v := range schemes { - schemes[k] = strings.ToLower(v) - } - if len(schemes) > 0 { - r.buildScheme = schemes[0] - } - return r.addMatcher(schemeMatcher(schemes)) -} - -// BuildVarsFunc -------------------------------------------------------------- - -// BuildVarsFunc is the function signature used by custom build variable -// functions (which can modify route variables before a route's URL is built). -type BuildVarsFunc func(map[string]string) map[string]string - -// BuildVarsFunc adds a custom function to be used to modify build variables -// before a route's URL is built. -func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { - if r.buildVarsFunc != nil { - // compose the old and new functions - old := r.buildVarsFunc - r.buildVarsFunc = func(m map[string]string) map[string]string { - return f(old(m)) - } - } else { - r.buildVarsFunc = f - } - return r -} - -// Subrouter ------------------------------------------------------------------ - -// Subrouter creates a subrouter for the route. -// -// It will test the inner routes only if the parent route matched. For example: -// -// r := mux.NewRouter() -// s := r.Host("www.example.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) -// -// Here, the routes registered in the subrouter won't be tested if the host -// doesn't match. -func (r *Route) Subrouter() *Router { - // initialize a subrouter with a copy of the parent route's configuration - router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} - r.addMatcher(router) - return router -} - -// ---------------------------------------------------------------------------- -// URL building -// ---------------------------------------------------------------------------- - -// URL builds a URL for the route. -// -// It accepts a sequence of key/value pairs for the route variables. For -// example, given this route: -// -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// ...a URL for it can be built using: -// -// url, err := r.Get("article").URL("category", "technology", "id", "42") -// -// ...which will return an url.URL with the following path: -// -// "/articles/technology/42" -// -// This also works for host variables: -// -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Host("{subdomain}.domain.com"). -// Name("article") -// -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") -// -// The scheme of the resulting url will be the first argument that was passed to Schemes: -// -// // url.String() will be "https://example.com" -// r := mux.NewRouter() -// url, err := r.Host("example.com") -// .Schemes("https", "http").URL() -// -// All variables defined in the route are required, and their values must -// conform to the corresponding patterns. -func (r *Route) URL(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - var scheme, host, path string - queries := make([]string, 0, len(r.regexp.queries)) - if r.regexp.host != nil { - if host, err = r.regexp.host.url(values); err != nil { - return nil, err - } - scheme = "http" - if r.buildScheme != "" { - scheme = r.buildScheme - } - } - if r.regexp.path != nil { - if path, err = r.regexp.path.url(values); err != nil { - return nil, err - } - } - for _, q := range r.regexp.queries { - var query string - if query, err = q.url(values); err != nil { - return nil, err - } - queries = append(queries, query) - } - return &url.URL{ - Scheme: scheme, - Host: host, - Path: path, - RawQuery: strings.Join(queries, "&"), - }, nil -} - -// URLHost builds the host part of the URL for a route. See Route.URL(). -// -// The route must have a host defined. -func (r *Route) URLHost(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.host == nil { - return nil, errors.New("mux: route doesn't have a host") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - host, err := r.regexp.host.url(values) - if err != nil { - return nil, err - } - u := &url.URL{ - Scheme: "http", - Host: host, - } - if r.buildScheme != "" { - u.Scheme = r.buildScheme - } - return u, nil -} - -// URLPath builds the path part of the URL for a route. See Route.URL(). -// -// The route must have a path defined. -func (r *Route) URLPath(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.path == nil { - return nil, errors.New("mux: route doesn't have a path") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - path, err := r.regexp.path.url(values) - if err != nil { - return nil, err - } - return &url.URL{ - Path: path, - }, nil -} - -// GetPathTemplate returns the template used to build the -// route match. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a path. -func (r *Route) GetPathTemplate() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp.path == nil { - return "", errors.New("mux: route doesn't have a path") - } - return r.regexp.path.template, nil -} - -// GetPathRegexp returns the expanded regular expression used to match route path. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a path. -func (r *Route) GetPathRegexp() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp.path == nil { - return "", errors.New("mux: route does not have a path") - } - return r.regexp.path.regexp.String(), nil -} - -// GetQueriesRegexp returns the expanded regular expressions used to match the -// route queries. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not have queries. -func (r *Route) GetQueriesRegexp() ([]string, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.queries == nil { - return nil, errors.New("mux: route doesn't have queries") - } - queries := make([]string, 0, len(r.regexp.queries)) - for _, query := range r.regexp.queries { - queries = append(queries, query.regexp.String()) - } - return queries, nil -} - -// GetQueriesTemplates returns the templates used to build the -// query matching. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define queries. -func (r *Route) GetQueriesTemplates() ([]string, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.queries == nil { - return nil, errors.New("mux: route doesn't have queries") - } - queries := make([]string, 0, len(r.regexp.queries)) - for _, query := range r.regexp.queries { - queries = append(queries, query.template) - } - return queries, nil -} - -// GetMethods returns the methods the route matches against -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if route does not have methods. -func (r *Route) GetMethods() ([]string, error) { - if r.err != nil { - return nil, r.err - } - for _, m := range r.matchers { - if methods, ok := m.(methodMatcher); ok { - return []string(methods), nil - } - } - return nil, errors.New("mux: route doesn't have methods") -} - -// GetHostTemplate returns the template used to build the -// route match. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a host. -func (r *Route) GetHostTemplate() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp.host == nil { - return "", errors.New("mux: route doesn't have a host") - } - return r.regexp.host.template, nil -} - -// prepareVars converts the route variable pairs into a map. If the route has a -// BuildVarsFunc, it is invoked. -func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { - m, err := mapFromPairsToString(pairs...) - if err != nil { - return nil, err - } - return r.buildVars(m), nil -} - -func (r *Route) buildVars(m map[string]string) map[string]string { - if r.buildVarsFunc != nil { - m = r.buildVarsFunc(m) - } - return m -} diff --git a/backend/services/controller/vendor/github.com/gorilla/mux/test_helpers.go b/backend/services/controller/vendor/github.com/gorilla/mux/test_helpers.go deleted file mode 100644 index 5f5c496..0000000 --- a/backend/services/controller/vendor/github.com/gorilla/mux/test_helpers.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import "net/http" - -// SetURLVars sets the URL variables for the given request, to be accessed via -// mux.Vars for testing route behaviour. Arguments are not modified, a shallow -// copy is returned. -// -// This API should only be used for testing purposes; it provides a way to -// inject variables into the request context. Alternatively, URL variables -// can be set by making a route that captures the required variables, -// starting a server and sending the request to that server. -func SetURLVars(r *http.Request, val map[string]string) *http.Request { - return requestWithVars(r, val) -} diff --git a/backend/services/controller/vendor/github.com/joho/godotenv/.gitignore b/backend/services/controller/vendor/github.com/joho/godotenv/.gitignore deleted file mode 100644 index e43b0f9..0000000 --- a/backend/services/controller/vendor/github.com/joho/godotenv/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.DS_Store diff --git a/backend/services/controller/vendor/github.com/joho/godotenv/LICENCE b/backend/services/controller/vendor/github.com/joho/godotenv/LICENCE deleted file mode 100644 index e7ddd51..0000000 --- a/backend/services/controller/vendor/github.com/joho/godotenv/LICENCE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2013 John Barton - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/backend/services/controller/vendor/github.com/joho/godotenv/README.md b/backend/services/controller/vendor/github.com/joho/godotenv/README.md deleted file mode 100644 index bfbe66a..0000000 --- a/backend/services/controller/vendor/github.com/joho/godotenv/README.md +++ /dev/null @@ -1,202 +0,0 @@ -# GoDotEnv ![CI](https://github.com/joho/godotenv/workflows/CI/badge.svg) [![Go Report Card](https://goreportcard.com/badge/github.com/joho/godotenv)](https://goreportcard.com/report/github.com/joho/godotenv) - -A Go (golang) port of the Ruby [dotenv](https://github.com/bkeepers/dotenv) project (which loads env vars from a .env file). - -From the original Library: - -> Storing configuration in the environment is one of the tenets of a twelve-factor app. Anything that is likely to change between deployment environments–such as resource handles for databases or credentials for external services–should be extracted from the code into environment variables. -> -> But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. Dotenv load variables from a .env file into ENV when the environment is bootstrapped. - -It can be used as a library (for loading in env for your own daemons etc.) or as a bin command. - -There is test coverage and CI for both linuxish and Windows environments, but I make no guarantees about the bin version working on Windows. - -## Installation - -As a library - -```shell -go get github.com/joho/godotenv -``` - -or if you want to use it as a bin command - -go >= 1.17 -```shell -go install github.com/joho/godotenv/cmd/godotenv@latest -``` - -go < 1.17 -```shell -go get github.com/joho/godotenv/cmd/godotenv -``` - -## Usage - -Add your application configuration to your `.env` file in the root of your project: - -```shell -S3_BUCKET=YOURS3BUCKET -SECRET_KEY=YOURSECRETKEYGOESHERE -``` - -Then in your Go app you can do something like - -```go -package main - -import ( - "log" - "os" - - "github.com/joho/godotenv" -) - -func main() { - err := godotenv.Load() - if err != nil { - log.Fatal("Error loading .env file") - } - - s3Bucket := os.Getenv("S3_BUCKET") - secretKey := os.Getenv("SECRET_KEY") - - // now do something with s3 or whatever -} -``` - -If you're even lazier than that, you can just take advantage of the autoload package which will read in `.env` on import - -```go -import _ "github.com/joho/godotenv/autoload" -``` - -While `.env` in the project root is the default, you don't have to be constrained, both examples below are 100% legit - -```go -godotenv.Load("somerandomfile") -godotenv.Load("filenumberone.env", "filenumbertwo.env") -``` - -If you want to be really fancy with your env file you can do comments and exports (below is a valid env file) - -```shell -# I am a comment and that is OK -SOME_VAR=someval -FOO=BAR # comments at line end are OK too -export BAR=BAZ -``` - -Or finally you can do YAML(ish) style - -```yaml -FOO: bar -BAR: baz -``` - -as a final aside, if you don't want godotenv munging your env you can just get a map back instead - -```go -var myEnv map[string]string -myEnv, err := godotenv.Read() - -s3Bucket := myEnv["S3_BUCKET"] -``` - -... or from an `io.Reader` instead of a local file - -```go -reader := getRemoteFile() -myEnv, err := godotenv.Parse(reader) -``` - -... or from a `string` if you so desire - -```go -content := getRemoteFileContent() -myEnv, err := godotenv.Unmarshal(content) -``` - -### Precedence & Conventions - -Existing envs take precedence of envs that are loaded later. - -The [convention](https://github.com/bkeepers/dotenv#what-other-env-files-can-i-use) -for managing multiple environments (i.e. development, test, production) -is to create an env named `{YOURAPP}_ENV` and load envs in this order: - -```go -env := os.Getenv("FOO_ENV") -if "" == env { - env = "development" -} - -godotenv.Load(".env." + env + ".local") -if "test" != env { - godotenv.Load(".env.local") -} -godotenv.Load(".env." + env) -godotenv.Load() // The Original .env -``` - -If you need to, you can also use `godotenv.Overload()` to defy this convention -and overwrite existing envs instead of only supplanting them. Use with caution. - -### Command Mode - -Assuming you've installed the command as above and you've got `$GOPATH/bin` in your `$PATH` - -``` -godotenv -f /some/path/to/.env some_command with some args -``` - -If you don't specify `-f` it will fall back on the default of loading `.env` in `PWD` - -By default, it won't override existing environment variables; you can do that with the `-o` flag. - -### Writing Env Files - -Godotenv can also write a map representing the environment to a correctly-formatted and escaped file - -```go -env, err := godotenv.Unmarshal("KEY=value") -err := godotenv.Write(env, "./.env") -``` - -... or to a string - -```go -env, err := godotenv.Unmarshal("KEY=value") -content, err := godotenv.Marshal(env) -``` - -## Contributing - -Contributions are welcome, but with some caveats. - -This library has been declared feature complete (see [#182](https://github.com/joho/godotenv/issues/182) for background) and will not be accepting issues or pull requests adding new functionality or breaking the library API. - -Contributions would be gladly accepted that: - -* bring this library's parsing into closer compatibility with the mainline dotenv implementations, in particular [Ruby's dotenv](https://github.com/bkeepers/dotenv) and [Node.js' dotenv](https://github.com/motdotla/dotenv) -* keep the library up to date with the go ecosystem (ie CI bumps, documentation changes, changes in the core libraries) -* bug fixes for use cases that pertain to the library's purpose of easing development of codebases deployed into twelve factor environments - -*code changes without tests and references to peer dotenv implementations will not be accepted* - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Added some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Releases - -Releases should follow [Semver](http://semver.org/) though the first couple of releases are `v1` and `v1.1`. - -Use [annotated tags for all releases](https://github.com/joho/godotenv/issues/30). Example `git tag -a v1.2.1` - -## Who? - -The original library [dotenv](https://github.com/bkeepers/dotenv) was written by [Brandon Keepers](http://opensoul.org/), and this port was done by [John Barton](https://johnbarton.co/) based off the tests/fixtures in the original library. diff --git a/backend/services/controller/vendor/github.com/joho/godotenv/godotenv.go b/backend/services/controller/vendor/github.com/joho/godotenv/godotenv.go deleted file mode 100644 index 61b0ebb..0000000 --- a/backend/services/controller/vendor/github.com/joho/godotenv/godotenv.go +++ /dev/null @@ -1,228 +0,0 @@ -// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) -// -// Examples/readme can be found on the GitHub page at https://github.com/joho/godotenv -// -// The TL;DR is that you make a .env file that looks something like -// -// SOME_ENV_VAR=somevalue -// -// and then in your go code you can call -// -// godotenv.Load() -// -// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR") -package godotenv - -import ( - "bytes" - "fmt" - "io" - "os" - "os/exec" - "sort" - "strconv" - "strings" -) - -const doubleQuoteSpecialChars = "\\\n\r\"!$`" - -// Parse reads an env file from io.Reader, returning a map of keys and values. -func Parse(r io.Reader) (map[string]string, error) { - var buf bytes.Buffer - _, err := io.Copy(&buf, r) - if err != nil { - return nil, err - } - - return UnmarshalBytes(buf.Bytes()) -} - -// Load will read your env file(s) and load them into ENV for this process. -// -// Call this function as close as possible to the start of your program (ideally in main). -// -// If you call Load without any args it will default to loading .env in the current path. -// -// You can otherwise tell it which files to load (there can be more than one) like: -// -// godotenv.Load("fileone", "filetwo") -// -// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults. -func Load(filenames ...string) (err error) { - filenames = filenamesOrDefault(filenames) - - for _, filename := range filenames { - err = loadFile(filename, false) - if err != nil { - return // return early on a spazout - } - } - return -} - -// Overload will read your env file(s) and load them into ENV for this process. -// -// Call this function as close as possible to the start of your program (ideally in main). -// -// If you call Overload without any args it will default to loading .env in the current path. -// -// You can otherwise tell it which files to load (there can be more than one) like: -// -// godotenv.Overload("fileone", "filetwo") -// -// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefully set all vars. -func Overload(filenames ...string) (err error) { - filenames = filenamesOrDefault(filenames) - - for _, filename := range filenames { - err = loadFile(filename, true) - if err != nil { - return // return early on a spazout - } - } - return -} - -// Read all env (with same file loading semantics as Load) but return values as -// a map rather than automatically writing values into env -func Read(filenames ...string) (envMap map[string]string, err error) { - filenames = filenamesOrDefault(filenames) - envMap = make(map[string]string) - - for _, filename := range filenames { - individualEnvMap, individualErr := readFile(filename) - - if individualErr != nil { - err = individualErr - return // return early on a spazout - } - - for key, value := range individualEnvMap { - envMap[key] = value - } - } - - return -} - -// Unmarshal reads an env file from a string, returning a map of keys and values. -func Unmarshal(str string) (envMap map[string]string, err error) { - return UnmarshalBytes([]byte(str)) -} - -// UnmarshalBytes parses env file from byte slice of chars, returning a map of keys and values. -func UnmarshalBytes(src []byte) (map[string]string, error) { - out := make(map[string]string) - err := parseBytes(src, out) - - return out, err -} - -// Exec loads env vars from the specified filenames (empty map falls back to default) -// then executes the cmd specified. -// -// Simply hooks up os.Stdin/err/out to the command and calls Run(). -// -// If you want more fine grained control over your command it's recommended -// that you use `Load()`, `Overload()` or `Read()` and the `os/exec` package yourself. -func Exec(filenames []string, cmd string, cmdArgs []string, overload bool) error { - op := Load - if overload { - op = Overload - } - if err := op(filenames...); err != nil { - return err - } - - command := exec.Command(cmd, cmdArgs...) - command.Stdin = os.Stdin - command.Stdout = os.Stdout - command.Stderr = os.Stderr - return command.Run() -} - -// Write serializes the given environment and writes it to a file. -func Write(envMap map[string]string, filename string) error { - content, err := Marshal(envMap) - if err != nil { - return err - } - file, err := os.Create(filename) - if err != nil { - return err - } - defer file.Close() - _, err = file.WriteString(content + "\n") - if err != nil { - return err - } - return file.Sync() -} - -// Marshal outputs the given environment as a dotenv-formatted environment file. -// Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped. -func Marshal(envMap map[string]string) (string, error) { - lines := make([]string, 0, len(envMap)) - for k, v := range envMap { - if d, err := strconv.Atoi(v); err == nil { - lines = append(lines, fmt.Sprintf(`%s=%d`, k, d)) - } else { - lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v))) - } - } - sort.Strings(lines) - return strings.Join(lines, "\n"), nil -} - -func filenamesOrDefault(filenames []string) []string { - if len(filenames) == 0 { - return []string{".env"} - } - return filenames -} - -func loadFile(filename string, overload bool) error { - envMap, err := readFile(filename) - if err != nil { - return err - } - - currentEnv := map[string]bool{} - rawEnv := os.Environ() - for _, rawEnvLine := range rawEnv { - key := strings.Split(rawEnvLine, "=")[0] - currentEnv[key] = true - } - - for key, value := range envMap { - if !currentEnv[key] || overload { - _ = os.Setenv(key, value) - } - } - - return nil -} - -func readFile(filename string) (envMap map[string]string, err error) { - file, err := os.Open(filename) - if err != nil { - return - } - defer file.Close() - - return Parse(file) -} - -func doubleQuoteEscape(line string) string { - for _, c := range doubleQuoteSpecialChars { - toReplace := "\\" + string(c) - if c == '\n' { - toReplace = `\n` - } - if c == '\r' { - toReplace = `\r` - } - line = strings.Replace(line, string(c), toReplace, -1) - } - return line -} diff --git a/backend/services/controller/vendor/github.com/joho/godotenv/parser.go b/backend/services/controller/vendor/github.com/joho/godotenv/parser.go deleted file mode 100644 index cc709af..0000000 --- a/backend/services/controller/vendor/github.com/joho/godotenv/parser.go +++ /dev/null @@ -1,271 +0,0 @@ -package godotenv - -import ( - "bytes" - "errors" - "fmt" - "regexp" - "strings" - "unicode" -) - -const ( - charComment = '#' - prefixSingleQuote = '\'' - prefixDoubleQuote = '"' - - exportPrefix = "export" -) - -func parseBytes(src []byte, out map[string]string) error { - src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) - cutset := src - for { - cutset = getStatementStart(cutset) - if cutset == nil { - // reached end of file - break - } - - key, left, err := locateKeyName(cutset) - if err != nil { - return err - } - - value, left, err := extractVarValue(left, out) - if err != nil { - return err - } - - out[key] = value - cutset = left - } - - return nil -} - -// getStatementPosition returns position of statement begin. -// -// It skips any comment line or non-whitespace character. -func getStatementStart(src []byte) []byte { - pos := indexOfNonSpaceChar(src) - if pos == -1 { - return nil - } - - src = src[pos:] - if src[0] != charComment { - return src - } - - // skip comment section - pos = bytes.IndexFunc(src, isCharFunc('\n')) - if pos == -1 { - return nil - } - - return getStatementStart(src[pos:]) -} - -// locateKeyName locates and parses key name and returns rest of slice -func locateKeyName(src []byte) (key string, cutset []byte, err error) { - // trim "export" and space at beginning - src = bytes.TrimLeftFunc(src, isSpace) - if bytes.HasPrefix(src, []byte(exportPrefix)) { - trimmed := bytes.TrimPrefix(src, []byte(exportPrefix)) - if bytes.IndexFunc(trimmed, isSpace) == 0 { - src = bytes.TrimLeftFunc(trimmed, isSpace) - } - } - - // locate key name end and validate it in single loop - offset := 0 -loop: - for i, char := range src { - rchar := rune(char) - if isSpace(rchar) { - continue - } - - switch char { - case '=', ':': - // library also supports yaml-style value declaration - key = string(src[0:i]) - offset = i + 1 - break loop - case '_': - default: - // variable name should match [A-Za-z0-9_.] - if unicode.IsLetter(rchar) || unicode.IsNumber(rchar) || rchar == '.' { - continue - } - - return "", nil, fmt.Errorf( - `unexpected character %q in variable name near %q`, - string(char), string(src)) - } - } - - if len(src) == 0 { - return "", nil, errors.New("zero length string") - } - - // trim whitespace - key = strings.TrimRightFunc(key, unicode.IsSpace) - cutset = bytes.TrimLeftFunc(src[offset:], isSpace) - return key, cutset, nil -} - -// extractVarValue extracts variable value and returns rest of slice -func extractVarValue(src []byte, vars map[string]string) (value string, rest []byte, err error) { - quote, hasPrefix := hasQuotePrefix(src) - if !hasPrefix { - // unquoted value - read until end of line - endOfLine := bytes.IndexFunc(src, isLineEnd) - - // Hit EOF without a trailing newline - if endOfLine == -1 { - endOfLine = len(src) - - if endOfLine == 0 { - return "", nil, nil - } - } - - // Convert line to rune away to do accurate countback of runes - line := []rune(string(src[0:endOfLine])) - - // Assume end of line is end of var - endOfVar := len(line) - if endOfVar == 0 { - return "", src[endOfLine:], nil - } - - // Work backwards to check if the line ends in whitespace then - // a comment (ie asdasd # some comment) - for i := endOfVar - 1; i >= 0; i-- { - if line[i] == charComment && i > 0 { - if isSpace(line[i-1]) { - endOfVar = i - break - } - } - } - - trimmed := strings.TrimFunc(string(line[0:endOfVar]), isSpace) - - return expandVariables(trimmed, vars), src[endOfLine:], nil - } - - // lookup quoted string terminator - for i := 1; i < len(src); i++ { - if char := src[i]; char != quote { - continue - } - - // skip escaped quote symbol (\" or \', depends on quote) - if prevChar := src[i-1]; prevChar == '\\' { - continue - } - - // trim quotes - trimFunc := isCharFunc(rune(quote)) - value = string(bytes.TrimLeftFunc(bytes.TrimRightFunc(src[0:i], trimFunc), trimFunc)) - if quote == prefixDoubleQuote { - // unescape newlines for double quote (this is compat feature) - // and expand environment variables - value = expandVariables(expandEscapes(value), vars) - } - - return value, src[i+1:], nil - } - - // return formatted error if quoted string is not terminated - valEndIndex := bytes.IndexFunc(src, isCharFunc('\n')) - if valEndIndex == -1 { - valEndIndex = len(src) - } - - return "", nil, fmt.Errorf("unterminated quoted value %s", src[:valEndIndex]) -} - -func expandEscapes(str string) string { - out := escapeRegex.ReplaceAllStringFunc(str, func(match string) string { - c := strings.TrimPrefix(match, `\`) - switch c { - case "n": - return "\n" - case "r": - return "\r" - default: - return match - } - }) - return unescapeCharsRegex.ReplaceAllString(out, "$1") -} - -func indexOfNonSpaceChar(src []byte) int { - return bytes.IndexFunc(src, func(r rune) bool { - return !unicode.IsSpace(r) - }) -} - -// hasQuotePrefix reports whether charset starts with single or double quote and returns quote character -func hasQuotePrefix(src []byte) (prefix byte, isQuored bool) { - if len(src) == 0 { - return 0, false - } - - switch prefix := src[0]; prefix { - case prefixDoubleQuote, prefixSingleQuote: - return prefix, true - default: - return 0, false - } -} - -func isCharFunc(char rune) func(rune) bool { - return func(v rune) bool { - return v == char - } -} - -// isSpace reports whether the rune is a space character but not line break character -// -// this differs from unicode.IsSpace, which also applies line break as space -func isSpace(r rune) bool { - switch r { - case '\t', '\v', '\f', '\r', ' ', 0x85, 0xA0: - return true - } - return false -} - -func isLineEnd(r rune) bool { - if r == '\n' || r == '\r' { - return true - } - return false -} - -var ( - escapeRegex = regexp.MustCompile(`\\.`) - expandVarRegex = regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`) - unescapeCharsRegex = regexp.MustCompile(`\\([^$])`) -) - -func expandVariables(v string, m map[string]string) string { - return expandVarRegex.ReplaceAllStringFunc(v, func(s string) string { - submatch := expandVarRegex.FindStringSubmatch(s) - - if submatch == nil { - return s - } - if submatch[1] == "\\" || submatch[2] == "(" { - return submatch[0][1:] - } else if submatch[4] != "" { - return m[submatch[4]] - } - return s - }) -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/.gitattributes b/backend/services/controller/vendor/github.com/klauspost/compress/.gitattributes deleted file mode 100644 index 4024335..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -* -text -*.bin -text -diff diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/.gitignore b/backend/services/controller/vendor/github.com/klauspost/compress/.gitignore deleted file mode 100644 index d31b378..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/.gitignore +++ /dev/null @@ -1,32 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -/s2/cmd/_s2sx/sfx-exe - -# Linux perf files -perf.data -perf.data.old - -# gdb history -.gdb_history diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/.goreleaser.yml b/backend/services/controller/vendor/github.com/klauspost/compress/.goreleaser.yml deleted file mode 100644 index 4c28dff..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/.goreleaser.yml +++ /dev/null @@ -1,127 +0,0 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com -before: - hooks: - - ./gen.sh - - go install mvdan.cc/garble@v0.10.1 - -builds: - - - id: "s2c" - binary: s2c - main: ./s2/cmd/s2c/main.go - flags: - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - gobinary: garble - - - id: "s2d" - binary: s2d - main: ./s2/cmd/s2d/main.go - flags: - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - gobinary: garble - - - id: "s2sx" - binary: s2sx - main: ./s2/cmd/_s2sx/main.go - flags: - - -modfile=s2sx.mod - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - gobinary: garble - -archives: - - - id: s2-binaries - name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" - format_overrides: - - goos: windows - format: zip - files: - - unpack/* - - s2/LICENSE - - s2/README.md -checksum: - name_template: 'checksums.txt' -snapshot: - name_template: "{{ .Tag }}-next" -changelog: - sort: asc - filters: - exclude: - - '^doc:' - - '^docs:' - - '^test:' - - '^tests:' - - '^Update\sREADME.md' - -nfpms: - - - file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" - vendor: Klaus Post - homepage: https://github.com/klauspost/compress - maintainer: Klaus Post - description: S2 Compression Tool - license: BSD 3-Clause - formats: - - deb - - rpm diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/LICENSE b/backend/services/controller/vendor/github.com/klauspost/compress/LICENSE deleted file mode 100644 index 87d5574..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/LICENSE +++ /dev/null @@ -1,304 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2019 Klaus Post. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------- - -Files: gzhttp/* - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016-2017 The New York Times Company - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ------------------- - -Files: s2/cmd/internal/readahead/* - -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------- -Files: snappy/* -Files: internal/snapref/* - -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------ - -Files: s2/cmd/internal/filepathx/* - -Copyright 2016 The filepathx Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/README.md b/backend/services/controller/vendor/github.com/klauspost/compress/README.md deleted file mode 100644 index 43de486..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/README.md +++ /dev/null @@ -1,661 +0,0 @@ -# compress - -This package provides various compression algorithms. - -* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. -* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). -* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. -* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. -* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. -* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. - -[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) -[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) -[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) - -# changelog - -* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) - * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 - * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 - * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 - * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 - * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 - * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 - -* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) - * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 - * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 - -* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) - * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 - * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 - * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 - * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 - -* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) - * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 - * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 - -* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) - * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 - * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 - * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 - * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 - * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 - * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 - -* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) - * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 - * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 - * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 - * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 - * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 - -* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) - * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 - * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 - * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 - * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 - * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 - * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 - -
- See changes to v1.15.x - -* Jan 21st, 2023 (v1.15.15) - * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 - * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 - * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 - * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 - -* Jan 3rd, 2023 (v1.15.14) - - * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 - * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 - * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 - * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 - -* Dec 11, 2022 (v1.15.13) - * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 - * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 - -* Oct 26, 2022 (v1.15.12) - - * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 - * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 - -* Sept 26, 2022 (v1.15.11) - - * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 - * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 - * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 - * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 - -* Sept 16, 2022 (v1.15.10) - - * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 - * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 - * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 - * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 - * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 - * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 - * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 - -* July 21, 2022 (v1.15.9) - - * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 - * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 - * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 - -* July 13, 2022 (v1.15.8) - - * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 - * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 - * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 - * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 - * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 - * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 - * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 - -* June 29, 2022 (v1.15.7) - - * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 - * zip: Merge upstream https://github.com/klauspost/compress/pull/631 - * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 - * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 - * flate: Faster histograms https://github.com/klauspost/compress/pull/620 - * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 - -* June 3, 2022 (v1.15.6) - * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 - * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 - * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 - * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 - * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 - * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 - * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 - * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 - * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 - * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 - -* May 25, 2022 (v1.15.5) - * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 - * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 - * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 - * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 - * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 - * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 - * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 - * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 - * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 - - -* May 11, 2022 (v1.15.4) - * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) - * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) - * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) - * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) - -* May 5, 2022 (v1.15.3) - * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) - * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) - -* Apr 26, 2022 (v1.15.2) - * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) - * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) - * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) - * Minimum version is Go 1.16, added CI test on 1.18. - -* Mar 11, 2022 (v1.15.1) - * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) - * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) - * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) - * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) - * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) - -* Mar 3, 2022 (v1.15.0) - * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) - * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) - * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) - * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) - * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) - * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) - -Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. - -Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. - -While the release has been extensively tested, it is recommended to testing when upgrading. - -
- -
- See changes to v1.14.x - -* Feb 22, 2022 (v1.14.4) - * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) - * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) - * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 - * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) - -* Feb 17, 2022 (v1.14.3) - * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) - * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) - * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) - -* Jan 25, 2022 (v1.14.2) - * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) - * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) - * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) - * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) - * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) - * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) - -* Jan 11, 2022 (v1.14.1) - * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) - * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) - * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) - * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) - * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) -
- -
- See changes to v1.13.x - -* Aug 30, 2021 (v1.13.5) - * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) - * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) - * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) - * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) - -* Aug 12, 2021 (v1.13.4) - * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). - * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) - -* Aug 3, 2021 (v1.13.3) - * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) - * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) - * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) - * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) - * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) - * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) - -* Jun 14, 2021 (v1.13.1) - * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) - * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) - * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) - * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) - -* Jun 3, 2021 (v1.13.0) - * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. - * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) - * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) -
- - -
- See changes to v1.12.x - -* May 25, 2021 (v1.12.3) - * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) - * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) - * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) - -* Apr 27, 2021 (v1.12.2) - * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) - * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) - * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) - * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) - * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) - * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) - -* Apr 14, 2021 (v1.12.1) - * snappy package removed. Upstream added as dependency. - * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) - * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) - * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) - * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) - * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) - * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) -
- -
- See changes to v1.11.x - -* Mar 26, 2021 (v1.11.13) - * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) - * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) - * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) - * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) - -* Mar 5, 2021 (v1.11.12) - * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). - * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) - -* Mar 1, 2021 (v1.11.9) - * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) - * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) - * s2: Fix binaries. - -* Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. - * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) - * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) - * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) - * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) - -* Jan 14, 2021 (v1.11.7) - * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) - * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) - * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) - * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) - * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) - -* Jan 7, 2021 (v1.11.6) - * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) - * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) - -* Dec 20, 2020 (v1.11.4) - * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) - * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) - * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) - * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) - * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) - -* Nov 15, 2020 (v1.11.3) - * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) - * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) - -* Oct 11, 2020 (v1.11.2) - * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) - -* Oct 1, 2020 (v1.11.1) - * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) - -* Sept 8, 2020 (v1.11.0) - * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) - * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) - * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) -
- -
- See changes to v1.10.x - -* July 8, 2020 (v1.10.11) - * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) - * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) - -* June 23, 2020 (v1.10.10) - * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) - -* June 16, 2020 (v1.10.9): - * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) - * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) - * Fuzzit tests removed. The service has been purchased and is no longer available. - -* June 5, 2020 (v1.10.8): - * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) - -* June 1, 2020 (v1.10.7): - * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) - * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) - * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) - -* May 21, 2020: (v1.10.6) - * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) - * zstd: Stricter decompression checks. - -* April 12, 2020: (v1.10.5) - * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) - -* Apr 8, 2020: (v1.10.4) - * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) -* Mar 11, 2020: (v1.10.3) - * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) - * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) - * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) - * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) - * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) - -* Feb 27, 2020: (v1.10.2) - * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) - * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) - -* Feb 18, 2020: (v1.10.1) - * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) - * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) - * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) - -* Feb 4, 2020: (v1.10.0) - * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) - * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) - * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) - * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) - -
- -
- See changes prior to v1.10.0 - -* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). -* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) -* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. -* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. -* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) -* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. -* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) -* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features -* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) -* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) -* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. -* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) -* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) -* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) -* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. -* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. -* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) -* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. -* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) -* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. -* Nov 11, 2019: Reduce inflate memory use by 1KB. -* Nov 10, 2019: Less allocations in deflate bit writer. -* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. -* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) -* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) -* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) -* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) - -
- -
- See changes prior to v1.9.0 - -* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) -* Oct 3, 2019: Fix inconsistent results on broken zstd streams. -* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) -* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). -* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). -* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). -* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. -* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. -* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. -* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. -* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. -* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. -* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. -* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) -* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) -* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) -* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) -* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. -* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. -* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. -* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. -* June 17, 2019: zstd decompression bugfix. -* June 17, 2019: fix 32 bit builds. -* June 17, 2019: Easier use in modules (less dependencies). -* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. -* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. -* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. -* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! -* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. -* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. -* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). -* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. -* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). -* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. -* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. -* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. -* May 28, 2017: Reduce allocations when resetting decoder. -* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. -* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). -* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. -* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. -* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. -* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. -* Mar 24, 2016: Small speedup for level 1-3. -* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. -* Feb 19, 2016: Handle small payloads faster in level 1-3. -* Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. -* Feb 14, 2016: Snappy: Merge upstream changes. -* Feb 14, 2016: Snappy: Fix aggressive skipping. -* Feb 14, 2016: Snappy: Update benchmark. -* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. -* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. -* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. -* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. -* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. -* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. -* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. -* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. -* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! -* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). -* Nov 20 2015: Small optimization to bit writer on 64 bit systems. -* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). -* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. -* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file -* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. - -
- -# deflate usage - -The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: - -| old import | new import | Documentation -|--------------------|-----------------------------------------|--------------------| -| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) -| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) -| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) -| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) - -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). - -You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. - -The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). - -Currently there is only minor speedup on decompression (mostly CRC32 calculation). - -Memory usage is typically 1MB for a Writer. stdlib is in the same range. -If you expect to have a lot of concurrently allocated Writers consider using -the stateless compress described below. - -For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). - -# Stateless compression - -This package offers stateless compression as a special option for gzip/deflate. -It will do compression but without maintaining any state between Write calls. - -This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. - -This is only relevant in cases where you expect to run many thousands of compressors concurrently, -but with very little activity. This is *not* intended for regular web servers serving individual requests. - -Because of this, the size of actual Write calls will affect output size. - -In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. - -For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) - -A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: - -``` - // replace 'ioutil.Discard' with your output. - gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) - if err != nil { - return err - } - defer gzw.Close() - - w := bufio.NewWriterSize(gzw, 4096) - defer w.Flush() - - // Write to 'w' -``` - -This will only use up to 4KB in memory when the writer is idle. - -Compression is almost always worse than the fastest compression level -and each write will allocate (a little) memory. - -# Performance Update 2018 - -It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. - -The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. - -The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. - -The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). - - -## Overall differences. - -There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. - -The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. - -This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. - -There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. - -## Web Content - -This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. - -Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. - -Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. - -## Object files - -This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. - -The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. - -The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. - -## Highly Compressible File - -This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. - -It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. - -So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". - -## Medium-High Compressible - -This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. - -We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. - -## Medium Compressible - -I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. - -The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. - - -## Un-compressible Content - -This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. - - -## Huffman only compression - -This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. - -This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). - -Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. - -The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). - -The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. - -For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). - -This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. - -# Other packages - -Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): - -* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. -* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. -* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. -* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. -* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. -* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. -* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. - -# license - -This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/SECURITY.md b/backend/services/controller/vendor/github.com/klauspost/compress/SECURITY.md deleted file mode 100644 index ca6685e..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/SECURITY.md +++ /dev/null @@ -1,25 +0,0 @@ -# Security Policy - -## Supported Versions - -Security updates are applied only to the latest release. - -## Vulnerability Definition - -A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. - -Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. - -Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. - -It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. - -Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. - -## Reporting a Vulnerability - -If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. - -Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. - -This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/compressible.go b/backend/services/controller/vendor/github.com/klauspost/compress/compressible.go deleted file mode 100644 index ea5a692..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/compressible.go +++ /dev/null @@ -1,85 +0,0 @@ -package compress - -import "math" - -// Estimate returns a normalized compressibility estimate of block b. -// Values close to zero are likely uncompressible. -// Values above 0.1 are likely to be compressible. -// Values above 0.5 are very compressible. -// Very small lengths will return 0. -func Estimate(b []byte) float64 { - if len(b) < 16 { - return 0 - } - - // Correctly predicted order 1 - hits := 0 - lastMatch := false - var o1 [256]byte - var hist [256]int - c1 := byte(0) - for _, c := range b { - if c == o1[c1] { - // We only count a hit if there was two correct predictions in a row. - if lastMatch { - hits++ - } - lastMatch = true - } else { - lastMatch = false - } - o1[c1] = c - c1 = c - hist[c]++ - } - - // Use x^0.6 to give better spread - prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) - - // Calculate histogram distribution - variance := float64(0) - avg := float64(len(b)) / 256 - - for _, v := range hist { - Δ := float64(v) - avg - variance += Δ * Δ - } - - stddev := math.Sqrt(float64(variance)) / float64(len(b)) - exp := math.Sqrt(1 / float64(len(b))) - - // Subtract expected stddev - stddev -= exp - if stddev < 0 { - stddev = 0 - } - stddev *= 1 + exp - - // Use x^0.4 to give better spread - entropy := math.Pow(stddev, 0.4) - - // 50/50 weight between prediction and histogram distribution - return math.Pow((prediction+entropy)/2, 0.9) -} - -// ShannonEntropyBits returns the number of bits minimum required to represent -// an entropy encoding of the input bytes. -// https://en.wiktionary.org/wiki/Shannon_entropy -func ShannonEntropyBits(b []byte) int { - if len(b) == 0 { - return 0 - } - var hist [256]int - for _, c := range b { - hist[c]++ - } - shannon := float64(0) - invTotal := 1.0 / float64(len(b)) - for _, v := range hist[:] { - if v > 0 { - n := float64(v) - shannon += math.Ceil(-math.Log2(n*invTotal) * n) - } - } - return int(math.Ceil(shannon)) -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/deflate.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/deflate.go deleted file mode 100644 index de912e1..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/deflate.go +++ /dev/null @@ -1,1017 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Copyright (c) 2015 Klaus Post -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "math" -) - -const ( - NoCompression = 0 - BestSpeed = 1 - BestCompression = 9 - DefaultCompression = -1 - - // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman - // entropy encoding. This mode is useful in compressing data that has - // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) - // that lacks an entropy encoder. Compression gains are achieved when - // certain bytes in the input stream occur more frequently than others. - // - // Note that HuffmanOnly produces a compressed output that is - // RFC 1951 compliant. That is, any valid DEFLATE decompressor will - // continue to be able to decompress this output. - HuffmanOnly = -2 - ConstantCompression = HuffmanOnly // compatibility alias. - - logWindowSize = 15 - windowSize = 1 << logWindowSize - windowMask = windowSize - 1 - logMaxOffsetSize = 15 // Standard DEFLATE - minMatchLength = 4 // The smallest match that the compressor looks for - maxMatchLength = 258 // The longest match for the compressor - minOffsetSize = 1 // The shortest offset that makes any sense - - // The maximum number of tokens we will encode at the time. - // Smaller sizes usually creates less optimal blocks. - // Bigger can make context switching slow. - // We use this for levels 7-9, so we make it big. - maxFlateBlockTokens = 1 << 15 - maxStoreBlockSize = 65535 - hashBits = 17 // After 17 performance degrades - hashSize = 1 << hashBits - hashMask = (1 << hashBits) - 1 - hashShift = (hashBits + minMatchLength - 1) / minMatchLength - maxHashOffset = 1 << 28 - - skipNever = math.MaxInt32 - - debugDeflate = false -) - -type compressionLevel struct { - good, lazy, nice, chain, fastSkipHashing, level int -} - -// Compression levels have been rebalanced from zlib deflate defaults -// to give a bigger spread in speed and compression. -// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ -var levels = []compressionLevel{ - {}, // 0 - // Level 1-6 uses specialized algorithm - values not used - {0, 0, 0, 0, 0, 1}, - {0, 0, 0, 0, 0, 2}, - {0, 0, 0, 0, 0, 3}, - {0, 0, 0, 0, 0, 4}, - {0, 0, 0, 0, 0, 5}, - {0, 0, 0, 0, 0, 6}, - // Levels 7-9 use increasingly more lazy matching - // and increasingly stringent conditions for "good enough". - {8, 12, 16, 24, skipNever, 7}, - {16, 30, 40, 64, skipNever, 8}, - {32, 258, 258, 1024, skipNever, 9}, -} - -// advancedState contains state for the advanced levels, with bigger hash tables, etc. -type advancedState struct { - // deflate state - length int - offset int - maxInsertIndex int - chainHead int - hashOffset int - - ii uint16 // position of last match, intended to overflow to reset. - - // input window: unprocessed data is window[index:windowEnd] - index int - hashMatch [maxMatchLength + minMatchLength]uint32 - - // Input hash chains - // hashHead[hashValue] contains the largest inputIndex with the specified hash value - // If hashHead[hashValue] is within the current window, then - // hashPrev[hashHead[hashValue] & windowMask] contains the previous index - // with the same hash value. - hashHead [hashSize]uint32 - hashPrev [windowSize]uint32 -} - -type compressor struct { - compressionLevel - - h *huffmanEncoder - w *huffmanBitWriter - - // compression algorithm - fill func(*compressor, []byte) int // copy data to window - step func(*compressor) // process window - - window []byte - windowEnd int - blockStart int // window index where current tokens start - err error - - // queued output tokens - tokens tokens - fast fastEnc - state *advancedState - - sync bool // requesting flush - byteAvailable bool // if true, still need to process window[index-1]. -} - -func (d *compressor) fillDeflate(b []byte) int { - s := d.state - if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) { - // shift the window by windowSize - //copy(d.window[:], d.window[windowSize:2*windowSize]) - *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:]) - s.index -= windowSize - d.windowEnd -= windowSize - if d.blockStart >= windowSize { - d.blockStart -= windowSize - } else { - d.blockStart = math.MaxInt32 - } - s.hashOffset += windowSize - if s.hashOffset > maxHashOffset { - delta := s.hashOffset - 1 - s.hashOffset -= delta - s.chainHead -= delta - // Iterate over slices instead of arrays to avoid copying - // the entire table onto the stack (Issue #18625). - for i, v := range s.hashPrev[:] { - if int(v) > delta { - s.hashPrev[i] = uint32(int(v) - delta) - } else { - s.hashPrev[i] = 0 - } - } - for i, v := range s.hashHead[:] { - if int(v) > delta { - s.hashHead[i] = uint32(int(v) - delta) - } else { - s.hashHead[i] = 0 - } - } - } - } - n := copy(d.window[d.windowEnd:], b) - d.windowEnd += n - return n -} - -func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { - if index > 0 || eof { - var window []byte - if d.blockStart <= index { - window = d.window[d.blockStart:index] - } - d.blockStart = index - //d.w.writeBlock(tok, eof, window) - d.w.writeBlockDynamic(tok, eof, window, d.sync) - return d.w.err - } - return nil -} - -// writeBlockSkip writes the current block and uses the number of tokens -// to determine if the block should be stored on no matches, or -// only huffman encoded. -func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { - if index > 0 || eof { - if d.blockStart <= index { - window := d.window[d.blockStart:index] - // If we removed less than a 64th of all literals - // we huffman compress the block. - if int(tok.n) > len(window)-int(tok.n>>6) { - d.w.writeBlockHuff(eof, window, d.sync) - } else { - // Write a dynamic huffman block. - d.w.writeBlockDynamic(tok, eof, window, d.sync) - } - } else { - d.w.writeBlock(tok, eof, nil) - } - d.blockStart = index - return d.w.err - } - return nil -} - -// fillWindow will fill the current window with the supplied -// dictionary and calculate all hashes. -// This is much faster than doing a full encode. -// Should only be used after a start/reset. -func (d *compressor) fillWindow(b []byte) { - // Do not fill window if we are in store-only or huffman mode. - if d.level <= 0 { - return - } - if d.fast != nil { - // encode the last data, but discard the result - if len(b) > maxMatchOffset { - b = b[len(b)-maxMatchOffset:] - } - d.fast.Encode(&d.tokens, b) - d.tokens.Reset() - return - } - s := d.state - // If we are given too much, cut it. - if len(b) > windowSize { - b = b[len(b)-windowSize:] - } - // Add all to window. - n := copy(d.window[d.windowEnd:], b) - - // Calculate 256 hashes at the time (more L1 cache hits) - loops := (n + 256 - minMatchLength) / 256 - for j := 0; j < loops; j++ { - startindex := j * 256 - end := startindex + 256 + minMatchLength - 1 - if end > n { - end = n - } - tocheck := d.window[startindex:end] - dstSize := len(tocheck) - minMatchLength + 1 - - if dstSize <= 0 { - continue - } - - dst := s.hashMatch[:dstSize] - bulkHash4(tocheck, dst) - var newH uint32 - for i, val := range dst { - di := i + startindex - newH = val & hashMask - // Get previous value with the same hash. - // Our chain should point to the previous value. - s.hashPrev[di&windowMask] = s.hashHead[newH] - // Set the head of the hash chain to us. - s.hashHead[newH] = uint32(di + s.hashOffset) - } - } - // Update window information. - d.windowEnd += n - s.index = n -} - -// Try to find a match starting at index whose length is greater than prevSize. -// We only look at chainCount possibilities before giving up. -// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead -func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { - minMatchLook := maxMatchLength - if lookahead < minMatchLook { - minMatchLook = lookahead - } - - win := d.window[0 : pos+minMatchLook] - - // We quit when we get a match that's at least nice long - nice := len(win) - pos - if d.nice < nice { - nice = d.nice - } - - // If we've got a match that's good enough, only look in 1/4 the chain. - tries := d.chain - length = minMatchLength - 1 - - wEnd := win[pos+length] - wPos := win[pos:] - minIndex := pos - windowSize - if minIndex < 0 { - minIndex = 0 - } - offset = 0 - - if d.chain < 100 { - for i := prevHead; tries > 0; tries-- { - if wEnd == win[i+length] { - n := matchLen(win[i:i+minMatchLook], wPos) - if n > length { - length = n - offset = pos - i - ok = true - if n >= nice { - // The match is good enough that we don't try to find a better one. - break - } - wEnd = win[pos+n] - } - } - if i <= minIndex { - // hashPrev[i & windowMask] has already been overwritten, so stop now. - break - } - i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset - if i < minIndex { - break - } - } - return - } - - // Minimum gain to accept a match. - cGain := 4 - - // Some like it higher (CSV), some like it lower (JSON) - const baseCost = 3 - // Base is 4 bytes at with an additional cost. - // Matches must be better than this. - - for i := prevHead; tries > 0; tries-- { - if wEnd == win[i+length] { - n := matchLen(win[i:i+minMatchLook], wPos) - if n > length { - // Calculate gain. Estimate - newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]]) - - //fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length) - if newGain > cGain { - length = n - offset = pos - i - cGain = newGain - ok = true - if n >= nice { - // The match is good enough that we don't try to find a better one. - break - } - wEnd = win[pos+n] - } - } - } - if i <= minIndex { - // hashPrev[i & windowMask] has already been overwritten, so stop now. - break - } - i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset - if i < minIndex { - break - } - } - return -} - -func (d *compressor) writeStoredBlock(buf []byte) error { - if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { - return d.w.err - } - d.w.writeBytes(buf) - return d.w.err -} - -// hash4 returns a hash representation of the first 4 bytes -// of the supplied slice. -// The caller must ensure that len(b) >= 4. -func hash4(b []byte) uint32 { - return hash4u(binary.LittleEndian.Uint32(b), hashBits) -} - -// hash4 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4u(u uint32, h uint8) uint32 { - return (u * prime4bytes) >> (32 - h) -} - -// bulkHash4 will compute hashes using the same -// algorithm as hash4 -func bulkHash4(b []byte, dst []uint32) { - if len(b) < 4 { - return - } - hb := binary.LittleEndian.Uint32(b) - - dst[0] = hash4u(hb, hashBits) - end := len(b) - 4 + 1 - for i := 1; i < end; i++ { - hb = (hb >> 8) | uint32(b[i+3])<<24 - dst[i] = hash4u(hb, hashBits) - } -} - -func (d *compressor) initDeflate() { - d.window = make([]byte, 2*windowSize) - d.byteAvailable = false - d.err = nil - if d.state == nil { - return - } - s := d.state - s.index = 0 - s.hashOffset = 1 - s.length = minMatchLength - 1 - s.offset = 0 - s.chainHead = -1 -} - -// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, -// meaning it always has lazy matching on. -func (d *compressor) deflateLazy() { - s := d.state - // Sanity enables additional runtime tests. - // It's intended to be used during development - // to supplement the currently ad-hoc unit tests. - const sanity = debugDeflate - - if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { - return - } - if d.windowEnd != s.index && d.chain > 100 { - // Get literal huffman coder. - if d.h == nil { - d.h = newHuffmanEncoder(maxFlateBlockTokens) - } - var tmp [256]uint16 - for _, v := range d.window[s.index:d.windowEnd] { - tmp[v]++ - } - d.h.generate(tmp[:], 15) - } - - s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) - - for { - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - lookahead := d.windowEnd - s.index - if lookahead < minMatchLength+maxMatchLength { - if !d.sync { - return - } - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - if lookahead == 0 { - // Flush current output block if any. - if d.byteAvailable { - // There is still one pending token that needs to be flushed - d.tokens.AddLiteral(d.window[s.index-1]) - d.byteAvailable = false - } - if d.tokens.n > 0 { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - return - } - } - if s.index < s.maxInsertIndex { - // Update the hash - hash := hash4(d.window[s.index:]) - ch := s.hashHead[hash] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[hash] = uint32(s.index + s.hashOffset) - } - prevLength := s.length - prevOffset := s.offset - s.length = minMatchLength - 1 - s.offset = 0 - minIndex := s.index - windowSize - if minIndex < 0 { - minIndex = 0 - } - - if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { - if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { - s.length = newLength - s.offset = newOffset - } - } - - if prevLength >= minMatchLength && s.length <= prevLength { - // No better match, but check for better match at end... - // - // Skip forward a number of bytes. - // Offset of 2 seems to yield best results. 3 is sometimes better. - const checkOff = 2 - - // Check all, except full length - if prevLength < maxMatchLength-checkOff { - prevIndex := s.index - 1 - if prevIndex+prevLength < s.maxInsertIndex { - end := lookahead - if lookahead > maxMatchLength+checkOff { - end = maxMatchLength + checkOff - } - end += prevIndex - - // Hash at match end. - h := hash4(d.window[prevIndex+prevLength:]) - ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength - if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { - length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) - // It seems like a pure length metric is best. - if length > prevLength { - prevLength = length - prevOffset = prevIndex - ch2 - - // Extend back... - for i := checkOff - 1; i >= 0; i-- { - if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] { - // Emit tokens we "owe" - for j := 0; j <= i; j++ { - d.tokens.AddLiteral(d.window[prevIndex+j]) - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.index++ - if s.index < s.maxInsertIndex { - h := hash4(d.window[s.index:]) - ch := s.hashHead[h] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[h] = uint32(s.index + s.hashOffset) - } - } - break - } else { - prevLength++ - } - } - } else if false { - // Check one further ahead. - // Only rarely better, disabled for now. - prevIndex++ - h := hash4(d.window[prevIndex+prevLength:]) - ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength - if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { - length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) - // It seems like a pure length metric is best. - if length > prevLength+checkOff { - prevLength = length - prevOffset = prevIndex - ch2 - prevIndex-- - - // Extend back... - for i := checkOff; i >= 0; i-- { - if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] { - // Emit tokens we "owe" - for j := 0; j <= i; j++ { - d.tokens.AddLiteral(d.window[prevIndex+j]) - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.index++ - if s.index < s.maxInsertIndex { - h := hash4(d.window[s.index:]) - ch := s.hashHead[h] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[h] = uint32(s.index + s.hashOffset) - } - } - break - } else { - prevLength++ - } - } - } - } - } - } - } - } - // There was a match at the previous step, and the current match is - // not better. Output the previous match. - d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) - - // Insert in the hash table all strings up to the end of the match. - // index and index-1 are already inserted. If there is not enough - // lookahead, the last two strings are not inserted into the hash - // table. - newIndex := s.index + prevLength - 1 - // Calculate missing hashes - end := newIndex - if end > s.maxInsertIndex { - end = s.maxInsertIndex - } - end += minMatchLength - 1 - startindex := s.index + 1 - if startindex > s.maxInsertIndex { - startindex = s.maxInsertIndex - } - tocheck := d.window[startindex:end] - dstSize := len(tocheck) - minMatchLength + 1 - if dstSize > 0 { - dst := s.hashMatch[:dstSize] - bulkHash4(tocheck, dst) - var newH uint32 - for i, val := range dst { - di := i + startindex - newH = val & hashMask - // Get previous value with the same hash. - // Our chain should point to the previous value. - s.hashPrev[di&windowMask] = s.hashHead[newH] - // Set the head of the hash chain to us. - s.hashHead[newH] = uint32(di + s.hashOffset) - } - } - - s.index = newIndex - d.byteAvailable = false - s.length = minMatchLength - 1 - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.ii = 0 - } else { - // Reset, if we got a match this run. - if s.length >= minMatchLength { - s.ii = 0 - } - // We have a byte waiting. Emit it. - if d.byteAvailable { - s.ii++ - d.tokens.AddLiteral(d.window[s.index-1]) - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.index++ - - // If we have a long run of no matches, skip additional bytes - // Resets when s.ii overflows after 64KB. - if n := int(s.ii) - d.chain; n > 0 { - n = 1 + int(n>>6) - for j := 0; j < n; j++ { - if s.index >= d.windowEnd-1 { - break - } - d.tokens.AddLiteral(d.window[s.index-1]) - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - // Index... - if s.index < s.maxInsertIndex { - h := hash4(d.window[s.index:]) - ch := s.hashHead[h] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[h] = uint32(s.index + s.hashOffset) - } - s.index++ - } - // Flush last byte - d.tokens.AddLiteral(d.window[s.index-1]) - d.byteAvailable = false - // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - } - } else { - s.index++ - d.byteAvailable = true - } - } - } -} - -func (d *compressor) store() { - if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { - d.err = d.writeStoredBlock(d.window[:d.windowEnd]) - d.windowEnd = 0 - } -} - -// fillWindow will fill the buffer with data for huffman-only compression. -// The number of bytes copied is returned. -func (d *compressor) fillBlock(b []byte) int { - n := copy(d.window[d.windowEnd:], b) - d.windowEnd += n - return n -} - -// storeHuff will compress and store the currently added data, -// if enough has been accumulated or we at the end of the stream. -// Any error that occurred will be in d.err -func (d *compressor) storeHuff() { - if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { - return - } - d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) - d.err = d.w.err - d.windowEnd = 0 -} - -// storeFast will compress and store the currently added data, -// if enough has been accumulated or we at the end of the stream. -// Any error that occurred will be in d.err -func (d *compressor) storeFast() { - // We only compress if we have maxStoreBlockSize. - if d.windowEnd < len(d.window) { - if !d.sync { - return - } - // Handle extremely small sizes. - if d.windowEnd < 128 { - if d.windowEnd == 0 { - return - } - if d.windowEnd <= 32 { - d.err = d.writeStoredBlock(d.window[:d.windowEnd]) - } else { - d.w.writeBlockHuff(false, d.window[:d.windowEnd], true) - d.err = d.w.err - } - d.tokens.Reset() - d.windowEnd = 0 - d.fast.Reset() - return - } - } - - d.fast.Encode(&d.tokens, d.window[:d.windowEnd]) - // If we made zero matches, store the block as is. - if d.tokens.n == 0 { - d.err = d.writeStoredBlock(d.window[:d.windowEnd]) - // If we removed less than 1/16th, huffman compress the block. - } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { - d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) - d.err = d.w.err - } else { - d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync) - d.err = d.w.err - } - d.tokens.Reset() - d.windowEnd = 0 -} - -// write will add input byte to the stream. -// Unless an error occurs all bytes will be consumed. -func (d *compressor) write(b []byte) (n int, err error) { - if d.err != nil { - return 0, d.err - } - n = len(b) - for len(b) > 0 { - if d.windowEnd == len(d.window) || d.sync { - d.step(d) - } - b = b[d.fill(d, b):] - if d.err != nil { - return 0, d.err - } - } - return n, d.err -} - -func (d *compressor) syncFlush() error { - d.sync = true - if d.err != nil { - return d.err - } - d.step(d) - if d.err == nil { - d.w.writeStoredHeader(0, false) - d.w.flush() - d.err = d.w.err - } - d.sync = false - return d.err -} - -func (d *compressor) init(w io.Writer, level int) (err error) { - d.w = newHuffmanBitWriter(w) - - switch { - case level == NoCompression: - d.window = make([]byte, maxStoreBlockSize) - d.fill = (*compressor).fillBlock - d.step = (*compressor).store - case level == ConstantCompression: - d.w.logNewTablePenalty = 10 - d.window = make([]byte, 32<<10) - d.fill = (*compressor).fillBlock - d.step = (*compressor).storeHuff - case level == DefaultCompression: - level = 5 - fallthrough - case level >= 1 && level <= 6: - d.w.logNewTablePenalty = 7 - d.fast = newFastEnc(level) - d.window = make([]byte, maxStoreBlockSize) - d.fill = (*compressor).fillBlock - d.step = (*compressor).storeFast - case 7 <= level && level <= 9: - d.w.logNewTablePenalty = 8 - d.state = &advancedState{} - d.compressionLevel = levels[level] - d.initDeflate() - d.fill = (*compressor).fillDeflate - d.step = (*compressor).deflateLazy - case -level >= MinCustomWindowSize && -level <= MaxCustomWindowSize: - d.w.logNewTablePenalty = 7 - d.fast = &fastEncL5Window{maxOffset: int32(-level), cur: maxStoreBlockSize} - d.window = make([]byte, maxStoreBlockSize) - d.fill = (*compressor).fillBlock - d.step = (*compressor).storeFast - default: - return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) - } - d.level = level - return nil -} - -// reset the state of the compressor. -func (d *compressor) reset(w io.Writer) { - d.w.reset(w) - d.sync = false - d.err = nil - // We only need to reset a few things for Snappy. - if d.fast != nil { - d.fast.Reset() - d.windowEnd = 0 - d.tokens.Reset() - return - } - switch d.compressionLevel.chain { - case 0: - // level was NoCompression or ConstantCompresssion. - d.windowEnd = 0 - default: - s := d.state - s.chainHead = -1 - for i := range s.hashHead { - s.hashHead[i] = 0 - } - for i := range s.hashPrev { - s.hashPrev[i] = 0 - } - s.hashOffset = 1 - s.index, d.windowEnd = 0, 0 - d.blockStart, d.byteAvailable = 0, false - d.tokens.Reset() - s.length = minMatchLength - 1 - s.offset = 0 - s.ii = 0 - s.maxInsertIndex = 0 - } -} - -func (d *compressor) close() error { - if d.err != nil { - return d.err - } - d.sync = true - d.step(d) - if d.err != nil { - return d.err - } - if d.w.writeStoredHeader(0, true); d.w.err != nil { - return d.w.err - } - d.w.flush() - d.w.reset(nil) - return d.w.err -} - -// NewWriter returns a new Writer compressing data at the given level. -// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); -// higher levels typically run slower but compress more. -// Level 0 (NoCompression) does not attempt any compression; it only adds the -// necessary DEFLATE framing. -// Level -1 (DefaultCompression) uses the default compression level. -// Level -2 (ConstantCompression) will use Huffman compression only, giving -// a very fast compression for all types of input, but sacrificing considerable -// compression efficiency. -// -// If level is in the range [-2, 9] then the error returned will be nil. -// Otherwise the error returned will be non-nil. -func NewWriter(w io.Writer, level int) (*Writer, error) { - var dw Writer - if err := dw.d.init(w, level); err != nil { - return nil, err - } - return &dw, nil -} - -// NewWriterDict is like NewWriter but initializes the new -// Writer with a preset dictionary. The returned Writer behaves -// as if the dictionary had been written to it without producing -// any compressed output. The compressed data written to w -// can only be decompressed by a Reader initialized with the -// same dictionary. -func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { - zw, err := NewWriter(w, level) - if err != nil { - return nil, err - } - zw.d.fillWindow(dict) - zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. - return zw, err -} - -// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow. -const MinCustomWindowSize = 32 - -// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow. -const MaxCustomWindowSize = windowSize - -// NewWriterWindow returns a new Writer compressing data with a custom window size. -// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize. -func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) { - if windowSize < MinCustomWindowSize { - return nil, errors.New("flate: requested window size less than MinWindowSize") - } - if windowSize > MaxCustomWindowSize { - return nil, errors.New("flate: requested window size bigger than MaxCustomWindowSize") - } - var dw Writer - if err := dw.d.init(w, -windowSize); err != nil { - return nil, err - } - return &dw, nil -} - -// A Writer takes data written to it and writes the compressed -// form of that data to an underlying writer (see NewWriter). -type Writer struct { - d compressor - dict []byte -} - -// Write writes data to w, which will eventually write the -// compressed form of data to its underlying writer. -func (w *Writer) Write(data []byte) (n int, err error) { - return w.d.write(data) -} - -// Flush flushes any pending data to the underlying writer. -// It is useful mainly in compressed network protocols, to ensure that -// a remote reader has enough data to reconstruct a packet. -// Flush does not return until the data has been written. -// Calling Flush when there is no pending data still causes the Writer -// to emit a sync marker of at least 4 bytes. -// If the underlying writer returns an error, Flush returns that error. -// -// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. -func (w *Writer) Flush() error { - // For more about flushing: - // http://www.bolet.org/~pornin/deflate-flush.html - return w.d.syncFlush() -} - -// Close flushes and closes the writer. -func (w *Writer) Close() error { - return w.d.close() -} - -// Reset discards the writer's state and makes it equivalent to -// the result of NewWriter or NewWriterDict called with dst -// and w's level and dictionary. -func (w *Writer) Reset(dst io.Writer) { - if len(w.dict) > 0 { - // w was created with NewWriterDict - w.d.reset(dst) - if dst != nil { - w.d.fillWindow(w.dict) - } - } else { - // w was created with NewWriter - w.d.reset(dst) - } -} - -// ResetDict discards the writer's state and makes it equivalent to -// the result of NewWriter or NewWriterDict called with dst -// and w's level, but sets a specific dictionary. -func (w *Writer) ResetDict(dst io.Writer, dict []byte) { - w.dict = dict - w.d.reset(dst) - w.d.fillWindow(w.dict) -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/dict_decoder.go deleted file mode 100644 index bb36351..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/dict_decoder.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -// dictDecoder implements the LZ77 sliding dictionary as used in decompression. -// LZ77 decompresses data through sequences of two forms of commands: -// -// - Literal insertions: Runs of one or more symbols are inserted into the data -// stream as is. This is accomplished through the writeByte method for a -// single symbol, or combinations of writeSlice/writeMark for multiple symbols. -// Any valid stream must start with a literal insertion if no preset dictionary -// is used. -// -// - Backward copies: Runs of one or more symbols are copied from previously -// emitted data. Backward copies come as the tuple (dist, length) where dist -// determines how far back in the stream to copy from and length determines how -// many bytes to copy. Note that it is valid for the length to be greater than -// the distance. Since LZ77 uses forward copies, that situation is used to -// perform a form of run-length encoding on repeated runs of symbols. -// The writeCopy and tryWriteCopy are used to implement this command. -// -// For performance reasons, this implementation performs little to no sanity -// checks about the arguments. As such, the invariants documented for each -// method call must be respected. -type dictDecoder struct { - hist []byte // Sliding window history - - // Invariant: 0 <= rdPos <= wrPos <= len(hist) - wrPos int // Current output position in buffer - rdPos int // Have emitted hist[:rdPos] already - full bool // Has a full window length been written yet? -} - -// init initializes dictDecoder to have a sliding window dictionary of the given -// size. If a preset dict is provided, it will initialize the dictionary with -// the contents of dict. -func (dd *dictDecoder) init(size int, dict []byte) { - *dd = dictDecoder{hist: dd.hist} - - if cap(dd.hist) < size { - dd.hist = make([]byte, size) - } - dd.hist = dd.hist[:size] - - if len(dict) > len(dd.hist) { - dict = dict[len(dict)-len(dd.hist):] - } - dd.wrPos = copy(dd.hist, dict) - if dd.wrPos == len(dd.hist) { - dd.wrPos = 0 - dd.full = true - } - dd.rdPos = dd.wrPos -} - -// histSize reports the total amount of historical data in the dictionary. -func (dd *dictDecoder) histSize() int { - if dd.full { - return len(dd.hist) - } - return dd.wrPos -} - -// availRead reports the number of bytes that can be flushed by readFlush. -func (dd *dictDecoder) availRead() int { - return dd.wrPos - dd.rdPos -} - -// availWrite reports the available amount of output buffer space. -func (dd *dictDecoder) availWrite() int { - return len(dd.hist) - dd.wrPos -} - -// writeSlice returns a slice of the available buffer to write data to. -// -// This invariant will be kept: len(s) <= availWrite() -func (dd *dictDecoder) writeSlice() []byte { - return dd.hist[dd.wrPos:] -} - -// writeMark advances the writer pointer by cnt. -// -// This invariant must be kept: 0 <= cnt <= availWrite() -func (dd *dictDecoder) writeMark(cnt int) { - dd.wrPos += cnt -} - -// writeByte writes a single byte to the dictionary. -// -// This invariant must be kept: 0 < availWrite() -func (dd *dictDecoder) writeByte(c byte) { - dd.hist[dd.wrPos] = c - dd.wrPos++ -} - -// writeCopy copies a string at a given (dist, length) to the output. -// This returns the number of bytes copied and may be less than the requested -// length if the available space in the output buffer is too small. -// -// This invariant must be kept: 0 < dist <= histSize() -func (dd *dictDecoder) writeCopy(dist, length int) int { - dstBase := dd.wrPos - dstPos := dstBase - srcPos := dstPos - dist - endPos := dstPos + length - if endPos > len(dd.hist) { - endPos = len(dd.hist) - } - - // Copy non-overlapping section after destination position. - // - // This section is non-overlapping in that the copy length for this section - // is always less than or equal to the backwards distance. This can occur - // if a distance refers to data that wraps-around in the buffer. - // Thus, a backwards copy is performed here; that is, the exact bytes in - // the source prior to the copy is placed in the destination. - if srcPos < 0 { - srcPos += len(dd.hist) - dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) - srcPos = 0 - } - - // Copy possibly overlapping section before destination position. - // - // This section can overlap if the copy length for this section is larger - // than the backwards distance. This is allowed by LZ77 so that repeated - // strings can be succinctly represented using (dist, length) pairs. - // Thus, a forwards copy is performed here; that is, the bytes copied is - // possibly dependent on the resulting bytes in the destination as the copy - // progresses along. This is functionally equivalent to the following: - // - // for i := 0; i < endPos-dstPos; i++ { - // dd.hist[dstPos+i] = dd.hist[srcPos+i] - // } - // dstPos = endPos - // - for dstPos < endPos { - dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) - } - - dd.wrPos = dstPos - return dstPos - dstBase -} - -// tryWriteCopy tries to copy a string at a given (distance, length) to the -// output. This specialized version is optimized for short distances. -// -// This method is designed to be inlined for performance reasons. -// -// This invariant must be kept: 0 < dist <= histSize() -func (dd *dictDecoder) tryWriteCopy(dist, length int) int { - dstPos := dd.wrPos - endPos := dstPos + length - if dstPos < dist || endPos > len(dd.hist) { - return 0 - } - dstBase := dstPos - srcPos := dstPos - dist - - // Copy possibly overlapping section before destination position. -loop: - dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) - if dstPos < endPos { - goto loop // Avoid for-loop so that this function can be inlined - } - - dd.wrPos = dstPos - return dstPos - dstBase -} - -// readFlush returns a slice of the historical buffer that is ready to be -// emitted to the user. The data returned by readFlush must be fully consumed -// before calling any other dictDecoder methods. -func (dd *dictDecoder) readFlush() []byte { - toRead := dd.hist[dd.rdPos:dd.wrPos] - dd.rdPos = dd.wrPos - if dd.wrPos == len(dd.hist) { - dd.wrPos, dd.rdPos = 0, 0 - dd.full = true - } - return toRead -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/fast_encoder.go deleted file mode 100644 index c8124b5..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Modified for deflate by Klaus Post (c) 2015. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "encoding/binary" - "fmt" -) - -type fastEnc interface { - Encode(dst *tokens, src []byte) - Reset() -} - -func newFastEnc(level int) fastEnc { - switch level { - case 1: - return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}} - case 2: - return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}} - case 3: - return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}} - case 4: - return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}} - case 5: - return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}} - case 6: - return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}} - default: - panic("invalid level specified") - } -} - -const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. - baseMatchOffset = 1 // The smallest match offset - baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 - maxMatchOffset = 1 << 15 // The largest match offset - - bTableBits = 17 // Bits used in the big tables - bTableSize = 1 << bTableBits // Size of the table - allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history. - bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this. -) - -const ( - prime3bytes = 506832829 - prime4bytes = 2654435761 - prime5bytes = 889523592379 - prime6bytes = 227718039650203 - prime7bytes = 58295818150454627 - prime8bytes = 0xcf1bbcdcb7a56463 -) - -func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[i:]) -} - -func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - -type tableEntry struct { - offset int32 -} - -// fastGen maintains the table for matches, -// and the previous byte block for level 2. -// This is the generic implementation. -type fastGen struct { - hist []byte - cur int32 -} - -func (e *fastGen) addBlock(src []byte) int32 { - // check if we have space already - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - e.hist = make([]byte, 0, allocHistory) - } else { - if cap(e.hist) < maxMatchOffset*2 { - panic("unexpected buffer size") - } - // Move down - offset := int32(len(e.hist)) - maxMatchOffset - // copy(e.hist[0:maxMatchOffset], e.hist[offset:]) - *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:maxMatchOffset] - } - } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} - -type tableEntryPrev struct { - Cur tableEntry - Prev tableEntry -} - -// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash7(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64)) -} - -// hashLen returns a hash of the lowest mls bytes of with length output bits. -// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. -// length should always be < 32. -// Preferably length and mls should be a constant for inlining. -func hashLen(u uint64, length, mls uint8) uint32 { - switch mls { - case 3: - return (uint32(u<<8) * prime3bytes) >> (32 - length) - case 5: - return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) - case 6: - return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) - case 7: - return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) - case 8: - return uint32((u * prime8bytes) >> (64 - length)) - default: - return (uint32(u) * prime4bytes) >> (32 - length) - } -} - -// matchlen will return the match length between offsets and t in src. -// The maximum length returned is maxMatchLength - 4. -// It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastGen) matchlen(s, t int32, src []byte) int32 { - if debugDecode { - if t >= s { - panic(fmt.Sprint("t >=s:", t, s)) - } - if int(s) >= len(src) { - panic(fmt.Sprint("s >= len(src):", s, len(src))) - } - if t < 0 { - panic(fmt.Sprint("t < 0:", t)) - } - if s-t > maxMatchOffset { - panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) - } - } - s1 := int(s) + maxMatchLength - 4 - if s1 > len(src) { - s1 = len(src) - } - - // Extend the match to be as long as possible. - return int32(matchLen(src[s:s1], src[t:])) -} - -// matchlenLong will return the match length between offsets and t in src. -// It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { - if debugDeflate { - if t >= s { - panic(fmt.Sprint("t >=s:", t, s)) - } - if int(s) >= len(src) { - panic(fmt.Sprint("s >= len(src):", s, len(src))) - } - if t < 0 { - panic(fmt.Sprint("t < 0:", t)) - } - if s-t > maxMatchOffset { - panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) - } - } - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) -} - -// Reset the encoding table. -func (e *fastGen) Reset() { - if cap(e.hist) < allocHistory { - e.hist = make([]byte, 0, allocHistory) - } - // We offset current position so everything will be out of reach. - // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. - if e.cur <= bufferReset { - e.cur += maxMatchOffset + int32(len(e.hist)) - } - e.hist = e.hist[:0] -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go deleted file mode 100644 index f70594c..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ /dev/null @@ -1,1182 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "encoding/binary" - "fmt" - "io" - "math" -) - -const ( - // The largest offset code. - offsetCodeCount = 30 - - // The special code used to mark the end of a block. - endBlockMarker = 256 - - // The first length code. - lengthCodesStart = 257 - - // The number of codegen codes. - codegenCodeCount = 19 - badCode = 255 - - // maxPredefinedTokens is the maximum number of tokens - // where we check if fixed size is smaller. - maxPredefinedTokens = 250 - - // bufferFlushSize indicates the buffer size - // after which bytes are flushed to the writer. - // Should preferably be a multiple of 6, since - // we accumulate 6 bytes between writes to the buffer. - bufferFlushSize = 246 -) - -// Minimum length code that emits bits. -const lengthExtraBitsMinCode = 8 - -// The number of extra bits needed by length code X - LENGTH_CODES_START. -var lengthExtraBits = [32]uint8{ - /* 257 */ 0, 0, 0, - /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, - /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, - /* 280 */ 4, 5, 5, 5, 5, 0, -} - -// The length indicated by length code X - LENGTH_CODES_START. -var lengthBase = [32]uint8{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, - 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, - 64, 80, 96, 112, 128, 160, 192, 224, 255, -} - -// Minimum offset code that emits bits. -const offsetExtraBitsMinCode = 4 - -// offset code word extra bits. -var offsetExtraBits = [32]int8{ - 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, - 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, - 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, - /* extended window */ - 14, 14, -} - -var offsetCombined = [32]uint32{} - -func init() { - var offsetBase = [32]uint32{ - /* normal deflate */ - 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, - 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, - 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, - 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, - 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, - 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, - - /* extended window */ - 0x008000, 0x00c000, - } - - for i := range offsetCombined[:] { - // Don't use extended window values... - if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 { - continue - } - offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8) - } -} - -// The odd order in which the codegen code sizes are written. -var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} - -type huffmanBitWriter struct { - // writer is the underlying writer. - // Do not use it directly; use the write method, which ensures - // that Write errors are sticky. - writer io.Writer - - // Data waiting to be written is bytes[0:nbytes] - // and then the low nbits of bits. - bits uint64 - nbits uint8 - nbytes uint8 - lastHuffMan bool - literalEncoding *huffmanEncoder - tmpLitEncoding *huffmanEncoder - offsetEncoding *huffmanEncoder - codegenEncoding *huffmanEncoder - err error - lastHeader int - // Set between 0 (reused block can be up to 2x the size) - logNewTablePenalty uint - bytes [256 + 8]byte - literalFreq [lengthCodesStart + 32]uint16 - offsetFreq [32]uint16 - codegenFreq [codegenCodeCount]uint16 - - // codegen must have an extra space for the final symbol. - codegen [literalCount + offsetCodeCount + 1]uint8 -} - -// Huffman reuse. -// -// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections. -// -// This is controlled by several variables: -// -// If lastHeader is non-zero the Huffman table can be reused. -// This also indicates that a Huffman table has been generated that can output all -// possible symbols. -// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated -// an EOB with the previous table must be written. -// -// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid. -// -// An incoming block estimates the output size of a new table using a 'fresh' by calculating the -// optimal size and adding a penalty in 'logNewTablePenalty'. -// A Huffman table is not optimal, which is why we add a penalty, and generating a new table -// is slower both for compression and decompression. - -func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { - return &huffmanBitWriter{ - writer: w, - literalEncoding: newHuffmanEncoder(literalCount), - tmpLitEncoding: newHuffmanEncoder(literalCount), - codegenEncoding: newHuffmanEncoder(codegenCodeCount), - offsetEncoding: newHuffmanEncoder(offsetCodeCount), - } -} - -func (w *huffmanBitWriter) reset(writer io.Writer) { - w.writer = writer - w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil - w.lastHeader = 0 - w.lastHuffMan = false -} - -func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) { - a := t.offHist[:offsetCodeCount] - b := w.offsetEncoding.codes - b = b[:len(a)] - for i, v := range a { - if v != 0 && b[i].zero() { - return false - } - } - - a = t.extraHist[:literalCount-256] - b = w.literalEncoding.codes[256:literalCount] - b = b[:len(a)] - for i, v := range a { - if v != 0 && b[i].zero() { - return false - } - } - - a = t.litHist[:256] - b = w.literalEncoding.codes[:len(a)] - for i, v := range a { - if v != 0 && b[i].zero() { - return false - } - } - return true -} - -func (w *huffmanBitWriter) flush() { - if w.err != nil { - w.nbits = 0 - return - } - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - n := w.nbytes - for w.nbits != 0 { - w.bytes[n] = byte(w.bits) - w.bits >>= 8 - if w.nbits > 8 { // Avoid underflow - w.nbits -= 8 - } else { - w.nbits = 0 - } - n++ - } - w.bits = 0 - w.write(w.bytes[:n]) - w.nbytes = 0 -} - -func (w *huffmanBitWriter) write(b []byte) { - if w.err != nil { - return - } - _, w.err = w.writer.Write(b) -} - -func (w *huffmanBitWriter) writeBits(b int32, nb uint8) { - w.bits |= uint64(b) << (w.nbits & 63) - w.nbits += nb - if w.nbits >= 48 { - w.writeOutBits() - } -} - -func (w *huffmanBitWriter) writeBytes(bytes []byte) { - if w.err != nil { - return - } - n := w.nbytes - if w.nbits&7 != 0 { - w.err = InternalError("writeBytes with unfinished bits") - return - } - for w.nbits != 0 { - w.bytes[n] = byte(w.bits) - w.bits >>= 8 - w.nbits -= 8 - n++ - } - if n != 0 { - w.write(w.bytes[:n]) - } - w.nbytes = 0 - w.write(bytes) -} - -// RFC 1951 3.2.7 specifies a special run-length encoding for specifying -// the literal and offset lengths arrays (which are concatenated into a single -// array). This method generates that run-length encoding. -// -// The result is written into the codegen array, and the frequencies -// of each code is written into the codegenFreq array. -// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional -// information. Code badCode is an end marker -// -// numLiterals The number of literals in literalEncoding -// numOffsets The number of offsets in offsetEncoding -// litenc, offenc The literal and offset encoder to use -func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { - for i := range w.codegenFreq { - w.codegenFreq[i] = 0 - } - // Note that we are using codegen both as a temporary variable for holding - // a copy of the frequencies, and as the place where we put the result. - // This is fine because the output is always shorter than the input used - // so far. - codegen := w.codegen[:] // cache - // Copy the concatenated code sizes to codegen. Put a marker at the end. - cgnl := codegen[:numLiterals] - for i := range cgnl { - cgnl[i] = litEnc.codes[i].len() - } - - cgnl = codegen[numLiterals : numLiterals+numOffsets] - for i := range cgnl { - cgnl[i] = offEnc.codes[i].len() - } - codegen[numLiterals+numOffsets] = badCode - - size := codegen[0] - count := 1 - outIndex := 0 - for inIndex := 1; size != badCode; inIndex++ { - // INVARIANT: We have seen "count" copies of size that have not yet - // had output generated for them. - nextSize := codegen[inIndex] - if nextSize == size { - count++ - continue - } - // We need to generate codegen indicating "count" of size. - if size != 0 { - codegen[outIndex] = size - outIndex++ - w.codegenFreq[size]++ - count-- - for count >= 3 { - n := 6 - if n > count { - n = count - } - codegen[outIndex] = 16 - outIndex++ - codegen[outIndex] = uint8(n - 3) - outIndex++ - w.codegenFreq[16]++ - count -= n - } - } else { - for count >= 11 { - n := 138 - if n > count { - n = count - } - codegen[outIndex] = 18 - outIndex++ - codegen[outIndex] = uint8(n - 11) - outIndex++ - w.codegenFreq[18]++ - count -= n - } - if count >= 3 { - // count >= 3 && count <= 10 - codegen[outIndex] = 17 - outIndex++ - codegen[outIndex] = uint8(count - 3) - outIndex++ - w.codegenFreq[17]++ - count = 0 - } - } - count-- - for ; count >= 0; count-- { - codegen[outIndex] = size - outIndex++ - w.codegenFreq[size]++ - } - // Set up invariant for next time through the loop. - size = nextSize - count = 1 - } - // Marker indicating the end of the codegen. - codegen[outIndex] = badCode -} - -func (w *huffmanBitWriter) codegens() int { - numCodegens := len(w.codegenFreq) - for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { - numCodegens-- - } - return numCodegens -} - -func (w *huffmanBitWriter) headerSize() (size, numCodegens int) { - numCodegens = len(w.codegenFreq) - for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { - numCodegens-- - } - return 3 + 5 + 5 + 4 + (3 * numCodegens) + - w.codegenEncoding.bitLength(w.codegenFreq[:]) + - int(w.codegenFreq[16])*2 + - int(w.codegenFreq[17])*3 + - int(w.codegenFreq[18])*7, numCodegens -} - -// dynamicSize returns the size of dynamically encoded data in bits. -func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) { - size = litEnc.bitLength(w.literalFreq[:]) + - offEnc.bitLength(w.offsetFreq[:]) - return size -} - -// dynamicSize returns the size of dynamically encoded data in bits. -func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { - header, numCodegens := w.headerSize() - size = header + - litEnc.bitLength(w.literalFreq[:]) + - offEnc.bitLength(w.offsetFreq[:]) + - extraBits - return size, numCodegens -} - -// extraBitSize will return the number of bits that will be written -// as "extra" bits on matches. -func (w *huffmanBitWriter) extraBitSize() int { - total := 0 - for i, n := range w.literalFreq[257:literalCount] { - total += int(n) * int(lengthExtraBits[i&31]) - } - for i, n := range w.offsetFreq[:offsetCodeCount] { - total += int(n) * int(offsetExtraBits[i&31]) - } - return total -} - -// fixedSize returns the size of dynamically encoded data in bits. -func (w *huffmanBitWriter) fixedSize(extraBits int) int { - return 3 + - fixedLiteralEncoding.bitLength(w.literalFreq[:]) + - fixedOffsetEncoding.bitLength(w.offsetFreq[:]) + - extraBits -} - -// storedSize calculates the stored size, including header. -// The function returns the size in bits and whether the block -// fits inside a single block. -func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { - if in == nil { - return 0, false - } - if len(in) <= maxStoreBlockSize { - return (len(in) + 5) * 8, true - } - return 0, false -} - -func (w *huffmanBitWriter) writeCode(c hcode) { - // The function does not get inlined if we "& 63" the shift. - w.bits |= c.code64() << (w.nbits & 63) - w.nbits += c.len() - if w.nbits >= 48 { - w.writeOutBits() - } -} - -// writeOutBits will write bits to the buffer. -func (w *huffmanBitWriter) writeOutBits() { - bits := w.bits - w.bits >>= 48 - w.nbits -= 48 - n := w.nbytes - - // We over-write, but faster... - binary.LittleEndian.PutUint64(w.bytes[n:], bits) - n += 6 - - if n >= bufferFlushSize { - if w.err != nil { - n = 0 - return - } - w.write(w.bytes[:n]) - n = 0 - } - - w.nbytes = n -} - -// Write the header of a dynamic Huffman block to the output stream. -// -// numLiterals The number of literals specified in codegen -// numOffsets The number of offsets specified in codegen -// numCodegens The number of codegens used in codegen -func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { - if w.err != nil { - return - } - var firstBits int32 = 4 - if isEof { - firstBits = 5 - } - w.writeBits(firstBits, 3) - w.writeBits(int32(numLiterals-257), 5) - w.writeBits(int32(numOffsets-1), 5) - w.writeBits(int32(numCodegens-4), 4) - - for i := 0; i < numCodegens; i++ { - value := uint(w.codegenEncoding.codes[codegenOrder[i]].len()) - w.writeBits(int32(value), 3) - } - - i := 0 - for { - var codeWord = uint32(w.codegen[i]) - i++ - if codeWord == badCode { - break - } - w.writeCode(w.codegenEncoding.codes[codeWord]) - - switch codeWord { - case 16: - w.writeBits(int32(w.codegen[i]), 2) - i++ - case 17: - w.writeBits(int32(w.codegen[i]), 3) - i++ - case 18: - w.writeBits(int32(w.codegen[i]), 7) - i++ - } - } -} - -// writeStoredHeader will write a stored header. -// If the stored block is only used for EOF, -// it is replaced with a fixed huffman block. -func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { - if w.err != nil { - return - } - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - - // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes. - if length == 0 && isEof { - w.writeFixedHeader(isEof) - // EOB: 7 bits, value: 0 - w.writeBits(0, 7) - w.flush() - return - } - - var flag int32 - if isEof { - flag = 1 - } - w.writeBits(flag, 3) - w.flush() - w.writeBits(int32(length), 16) - w.writeBits(int32(^uint16(length)), 16) -} - -func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { - if w.err != nil { - return - } - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - - // Indicate that we are a fixed Huffman block - var value int32 = 2 - if isEof { - value = 3 - } - w.writeBits(value, 3) -} - -// writeBlock will write a block of tokens with the smallest encoding. -// The original input can be supplied, and if the huffman encoded data -// is larger than the original bytes, the data will be written as a -// stored block. -// If the input is nil, the tokens will always be Huffman encoded. -func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { - if w.err != nil { - return - } - - tokens.AddEOB() - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - numLiterals, numOffsets := w.indexTokens(tokens, false) - w.generate() - var extraBits int - storedSize, storable := w.storedSize(input) - if storable { - extraBits = w.extraBitSize() - } - - // Figure out smallest code. - // Fixed Huffman baseline. - var literalEncoding = fixedLiteralEncoding - var offsetEncoding = fixedOffsetEncoding - var size = math.MaxInt32 - if tokens.n < maxPredefinedTokens { - size = w.fixedSize(extraBits) - } - - // Dynamic Huffman? - var numCodegens int - - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) - - if dynamicSize < size { - size = dynamicSize - literalEncoding = w.literalEncoding - offsetEncoding = w.offsetEncoding - } - - // Stored bytes? - if storable && storedSize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - - // Huffman. - if literalEncoding == fixedLiteralEncoding { - w.writeFixedHeader(eof) - } else { - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - } - - // Write the tokens. - w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes) -} - -// writeBlockDynamic encodes a block using a dynamic Huffman table. -// This should be used if the symbols used have a disproportionate -// histogram distribution. -// If input is supplied and the compression savings are below 1/16th of the -// input size the block is stored. -func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) { - if w.err != nil { - return - } - - sync = sync || eof - if sync { - tokens.AddEOB() - } - - // We cannot reuse pure huffman table, and must mark as EOF. - if (w.lastHuffMan || eof) && w.lastHeader > 0 { - // We will not try to reuse. - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - w.lastHuffMan = false - } - - // fillReuse enables filling of empty values. - // This will make encodings always reusable without testing. - // However, this does not appear to benefit on most cases. - const fillReuse = false - - // Check if we can reuse... - if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) { - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - - numLiterals, numOffsets := w.indexTokens(tokens, !sync) - extraBits := 0 - ssize, storable := w.storedSize(input) - - const usePrefs = true - if storable || w.lastHeader > 0 { - extraBits = w.extraBitSize() - } - - var size int - - // Check if we should reuse. - if w.lastHeader > 0 { - // Estimate size for using a new table. - // Use the previous header size as the best estimate. - newSize := w.lastHeader + tokens.EstimatedBits() - newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty - - // The estimated size is calculated as an optimal table. - // We add a penalty to make it more realistic and re-use a bit more. - reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits - - // Check if a new table is better. - if newSize < reuseSize { - // Write the EOB we owe. - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - size = newSize - w.lastHeader = 0 - } else { - size = reuseSize - } - - if tokens.n < maxPredefinedTokens { - if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { - // Check if we get a reasonable size decrease. - if storable && ssize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - w.writeFixedHeader(eof) - if !sync { - tokens.AddEOB() - } - w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) - return - } - } - // Check if we get a reasonable size decrease. - if storable && ssize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - } - - // We want a new block/table - if w.lastHeader == 0 { - if fillReuse && !sync { - w.fillTokens() - numLiterals, numOffsets = maxNumLit, maxNumDist - } else { - w.literalFreq[endBlockMarker] = 1 - } - - w.generate() - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - - var numCodegens int - if fillReuse && !sync { - // Reindex for accurate size... - w.indexTokens(tokens, true) - } - size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) - - // Store predefined, if we don't get a reasonable improvement. - if tokens.n < maxPredefinedTokens { - if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { - // Store bytes, if we don't get an improvement. - if storable && ssize <= preSize { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - w.writeFixedHeader(eof) - if !sync { - tokens.AddEOB() - } - w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) - return - } - } - - if storable && ssize <= size { - // Store bytes, if we don't get an improvement. - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - - // Write Huffman table. - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - if !sync { - w.lastHeader, _ = w.headerSize() - } - w.lastHuffMan = false - } - - if sync { - w.lastHeader = 0 - } - // Write the tokens. - w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes) -} - -func (w *huffmanBitWriter) fillTokens() { - for i, v := range w.literalFreq[:literalCount] { - if v == 0 { - w.literalFreq[i] = 1 - } - } - for i, v := range w.offsetFreq[:offsetCodeCount] { - if v == 0 { - w.offsetFreq[i] = 1 - } - } -} - -// indexTokens indexes a slice of tokens, and updates -// literalFreq and offsetFreq, and generates literalEncoding -// and offsetEncoding. -// The number of literal and offset tokens is returned. -func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { - //copy(w.literalFreq[:], t.litHist[:]) - *(*[256]uint16)(w.literalFreq[:]) = t.litHist - //copy(w.literalFreq[256:], t.extraHist[:]) - *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist - w.offsetFreq = t.offHist - - if t.n == 0 { - return - } - if filled { - return maxNumLit, maxNumDist - } - // get the number of literals - numLiterals = len(w.literalFreq) - for w.literalFreq[numLiterals-1] == 0 { - numLiterals-- - } - // get the number of offsets - numOffsets = len(w.offsetFreq) - for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { - numOffsets-- - } - if numOffsets == 0 { - // We haven't found a single match. If we want to go with the dynamic encoding, - // we should count at least one offset to be sure that the offset huffman tree could be encoded. - w.offsetFreq[0] = 1 - numOffsets = 1 - } - return -} - -func (w *huffmanBitWriter) generate() { - w.literalEncoding.generate(w.literalFreq[:literalCount], 15) - w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) -} - -// writeTokens writes a slice of tokens to the output. -// codes for literal and offset encoding must be supplied. -func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { - if w.err != nil { - return - } - if len(tokens) == 0 { - return - } - - // Only last token should be endBlockMarker. - var deferEOB bool - if tokens[len(tokens)-1] == endBlockMarker { - tokens = tokens[:len(tokens)-1] - deferEOB = true - } - - // Create slices up to the next power of two to avoid bounds checks. - lits := leCodes[:256] - offs := oeCodes[:32] - lengths := leCodes[lengthCodesStart:] - lengths = lengths[:32] - - // Go 1.16 LOVES having these on stack. - bits, nbits, nbytes := w.bits, w.nbits, w.nbytes - - for _, t := range tokens { - if t < 256 { - //w.writeCode(lits[t.literal()]) - c := lits[t] - bits |= c.code64() << (nbits & 63) - nbits += c.len() - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - continue - } - - // Write the length - length := t.length() - lengthCode := lengthCode(length) & 31 - if false { - w.writeCode(lengths[lengthCode]) - } else { - // inlined - c := lengths[lengthCode] - bits |= c.code64() << (nbits & 63) - nbits += c.len() - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - - if lengthCode >= lengthExtraBitsMinCode { - extraLengthBits := lengthExtraBits[lengthCode] - //w.writeBits(extraLength, extraLengthBits) - extraLength := int32(length - lengthBase[lengthCode]) - bits |= uint64(extraLength) << (nbits & 63) - nbits += extraLengthBits - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - // Write the offset - offset := t.offset() - offsetCode := (offset >> 16) & 31 - if false { - w.writeCode(offs[offsetCode]) - } else { - // inlined - c := offs[offsetCode] - bits |= c.code64() << (nbits & 63) - nbits += c.len() - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - - if offsetCode >= offsetExtraBitsMinCode { - offsetComb := offsetCombined[offsetCode] - //w.writeBits(extraOffset, extraOffsetBits) - bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) - nbits += uint8(offsetComb) - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - } - // Restore... - w.bits, w.nbits, w.nbytes = bits, nbits, nbytes - - if deferEOB { - w.writeCode(leCodes[endBlockMarker]) - } -} - -// huffOffset is a static offset encoder used for huffman only encoding. -// It can be reused since we will not be encoding offset values. -var huffOffset *huffmanEncoder - -func init() { - w := newHuffmanBitWriter(nil) - w.offsetFreq[0] = 1 - huffOffset = newHuffmanEncoder(offsetCodeCount) - huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15) -} - -// writeBlockHuff encodes a block of bytes as either -// Huffman encoded literals or uncompressed bytes if the -// results only gains very little from compression. -func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { - if w.err != nil { - return - } - - // Clear histogram - for i := range w.literalFreq[:] { - w.literalFreq[i] = 0 - } - if !w.lastHuffMan { - for i := range w.offsetFreq[:] { - w.offsetFreq[i] = 0 - } - } - - const numLiterals = endBlockMarker + 1 - const numOffsets = 1 - - // Add everything as literals - // We have to estimate the header size. - // Assume header is around 70 bytes: - // https://stackoverflow.com/a/25454430 - const guessHeaderSizeBits = 70 * 8 - histogram(input, w.literalFreq[:numLiterals]) - ssize, storable := w.storedSize(input) - if storable && len(input) > 1024 { - // Quick check for incompressible content. - abs := float64(0) - avg := float64(len(input)) / 256 - max := float64(len(input) * 2) - for _, v := range w.literalFreq[:256] { - diff := float64(v) - avg - abs += diff * diff - if abs > max { - break - } - } - if abs < max { - if debugDeflate { - fmt.Println("stored", abs, "<", max) - } - // No chance we can compress this... - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - } - w.literalFreq[endBlockMarker] = 1 - w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) - estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals]) - if estBits < math.MaxInt32 { - estBits += w.lastHeader - if w.lastHeader == 0 { - estBits += guessHeaderSizeBits - } - estBits += estBits >> w.logNewTablePenalty - } - - // Store bytes, if we don't get a reasonable improvement. - if storable && ssize <= estBits { - if debugDeflate { - fmt.Println("stored,", ssize, "<=", estBits) - } - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - - if w.lastHeader > 0 { - reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256]) - - if estBits < reuseSize { - if debugDeflate { - fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes") - } - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } else if debugDeflate { - fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) - } - } - - count := 0 - if w.lastHeader == 0 { - // Use the temp encoding, so swap. - w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - numCodegens := w.codegens() - - // Huffman. - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - w.lastHuffMan = true - w.lastHeader, _ = w.headerSize() - if debugDeflate { - count += w.lastHeader - fmt.Println("header:", count/8) - } - } - - encoding := w.literalEncoding.codes[:256] - // Go 1.16 LOVES having these on stack. At least 1.5x the speed. - bits, nbits, nbytes := w.bits, w.nbits, w.nbytes - - if debugDeflate { - count -= int(nbytes)*8 + int(nbits) - } - // Unroll, write 3 codes/loop. - // Fastest number of unrolls. - for len(input) > 3 { - // We must have at least 48 bits free. - if nbits >= 8 { - n := nbits >> 3 - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - bits >>= (n * 8) & 63 - nbits -= n * 8 - nbytes += n - } - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - if debugDeflate { - count += int(nbytes) * 8 - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - a, b := encoding[input[0]], encoding[input[1]] - bits |= a.code64() << (nbits & 63) - bits |= b.code64() << ((nbits + a.len()) & 63) - c := encoding[input[2]] - nbits += b.len() + a.len() - bits |= c.code64() << (nbits & 63) - nbits += c.len() - input = input[3:] - } - - // Remaining... - for _, t := range input { - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - if debugDeflate { - count += int(nbytes) * 8 - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - // Bitwriting inlined, ~30% speedup - c := encoding[t] - bits |= c.code64() << (nbits & 63) - - nbits += c.len() - if debugDeflate { - count += int(c.len()) - } - } - // Restore... - w.bits, w.nbits, w.nbytes = bits, nbits, nbytes - - if debugDeflate { - nb := count + int(nbytes)*8 + int(nbits) - fmt.Println("wrote", nb, "bits,", nb/8, "bytes.") - } - // Flush if needed to have space. - if w.nbits >= 48 { - w.writeOutBits() - } - - if eof || sync { - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - w.lastHuffMan = false - } -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/huffman_code.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/huffman_code.go deleted file mode 100644 index be7b58b..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ /dev/null @@ -1,417 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "math" - "math/bits" -) - -const ( - maxBitsLimit = 16 - // number of valid literals - literalCount = 286 -) - -// hcode is a huffman code with a bit code and bit length. -type hcode uint32 - -func (h hcode) len() uint8 { - return uint8(h) -} - -func (h hcode) code64() uint64 { - return uint64(h >> 8) -} - -func (h hcode) zero() bool { - return h == 0 -} - -type huffmanEncoder struct { - codes []hcode - bitCount [17]int32 - - // Allocate a reusable buffer with the longest possible frequency table. - // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. - // The largest of these is literalCount, so we allocate for that case. - freqcache [literalCount + 1]literalNode -} - -type literalNode struct { - literal uint16 - freq uint16 -} - -// A levelInfo describes the state of the constructed tree for a given depth. -type levelInfo struct { - // Our level. for better printing - level int32 - - // The frequency of the last node at this level - lastFreq int32 - - // The frequency of the next character to add to this level - nextCharFreq int32 - - // The frequency of the next pair (from level below) to add to this level. - // Only valid if the "needed" value of the next lower level is 0. - nextPairFreq int32 - - // The number of chains remaining to generate for this level before moving - // up to the next level - needed int32 -} - -// set sets the code and length of an hcode. -func (h *hcode) set(code uint16, length uint8) { - *h = hcode(length) | (hcode(code) << 8) -} - -func newhcode(code uint16, length uint8) hcode { - return hcode(length) | (hcode(code) << 8) -} - -func reverseBits(number uint16, bitLength byte) uint16 { - return bits.Reverse16(number << ((16 - bitLength) & 15)) -} - -func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} } - -func newHuffmanEncoder(size int) *huffmanEncoder { - // Make capacity to next power of two. - c := uint(bits.Len32(uint32(size - 1))) - return &huffmanEncoder{codes: make([]hcode, size, 1<= 3 -// The cases of 0, 1, and 2 literals are handled by special case code. -// -// list An array of the literals with non-zero frequencies -// -// and their associated frequencies. The array is in order of increasing -// frequency, and has as its last element a special element with frequency -// MaxInt32 -// -// maxBits The maximum number of bits that should be used to encode any literal. -// -// Must be less than 16. -// -// return An integer array in which array[i] indicates the number of literals -// -// that should be encoded in i bits. -func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { - if maxBits >= maxBitsLimit { - panic("flate: maxBits too large") - } - n := int32(len(list)) - list = list[0 : n+1] - list[n] = maxNode() - - // The tree can't have greater depth than n - 1, no matter what. This - // saves a little bit of work in some small cases - if maxBits > n-1 { - maxBits = n - 1 - } - - // Create information about each of the levels. - // A bogus "Level 0" whose sole purpose is so that - // level1.prev.needed==0. This makes level1.nextPairFreq - // be a legitimate value that never gets chosen. - var levels [maxBitsLimit]levelInfo - // leafCounts[i] counts the number of literals at the left - // of ancestors of the rightmost node at level i. - // leafCounts[i][j] is the number of literals at the left - // of the level j ancestor. - var leafCounts [maxBitsLimit][maxBitsLimit]int32 - - // Descending to only have 1 bounds check. - l2f := int32(list[2].freq) - l1f := int32(list[1].freq) - l0f := int32(list[0].freq) + int32(list[1].freq) - - for level := int32(1); level <= maxBits; level++ { - // For every level, the first two items are the first two characters. - // We initialize the levels as if we had already figured this out. - levels[level] = levelInfo{ - level: level, - lastFreq: l1f, - nextCharFreq: l2f, - nextPairFreq: l0f, - } - leafCounts[level][level] = 2 - if level == 1 { - levels[level].nextPairFreq = math.MaxInt32 - } - } - - // We need a total of 2*n - 2 items at top level and have already generated 2. - levels[maxBits].needed = 2*n - 4 - - level := uint32(maxBits) - for level < 16 { - l := &levels[level] - if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { - // We've run out of both leafs and pairs. - // End all calculations for this level. - // To make sure we never come back to this level or any lower level, - // set nextPairFreq impossibly large. - l.needed = 0 - levels[level+1].nextPairFreq = math.MaxInt32 - level++ - continue - } - - prevFreq := l.lastFreq - if l.nextCharFreq < l.nextPairFreq { - // The next item on this row is a leaf node. - n := leafCounts[level][level] + 1 - l.lastFreq = l.nextCharFreq - // Lower leafCounts are the same of the previous node. - leafCounts[level][level] = n - e := list[n] - if e.literal < math.MaxUint16 { - l.nextCharFreq = int32(e.freq) - } else { - l.nextCharFreq = math.MaxInt32 - } - } else { - // The next item on this row is a pair from the previous row. - // nextPairFreq isn't valid until we generate two - // more values in the level below - l.lastFreq = l.nextPairFreq - // Take leaf counts from the lower level, except counts[level] remains the same. - if true { - save := leafCounts[level][level] - leafCounts[level] = leafCounts[level-1] - leafCounts[level][level] = save - } else { - copy(leafCounts[level][:level], leafCounts[level-1][:level]) - } - levels[l.level-1].needed = 2 - } - - if l.needed--; l.needed == 0 { - // We've done everything we need to do for this level. - // Continue calculating one level up. Fill in nextPairFreq - // of that level with the sum of the two nodes we've just calculated on - // this level. - if l.level == maxBits { - // All done! - break - } - levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq - level++ - } else { - // If we stole from below, move down temporarily to replenish it. - for levels[level-1].needed > 0 { - level-- - } - } - } - - // Somethings is wrong if at the end, the top level is null or hasn't used - // all of the leaves. - if leafCounts[maxBits][maxBits] != n { - panic("leafCounts[maxBits][maxBits] != n") - } - - bitCount := h.bitCount[:maxBits+1] - bits := 1 - counts := &leafCounts[maxBits] - for level := maxBits; level > 0; level-- { - // chain.leafCount gives the number of literals requiring at least "bits" - // bits to encode. - bitCount[bits] = counts[level] - counts[level-1] - bits++ - } - return bitCount -} - -// Look at the leaves and assign them a bit count and an encoding as specified -// in RFC 1951 3.2.2 -func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { - code := uint16(0) - for n, bits := range bitCount { - code <<= 1 - if n == 0 || bits == 0 { - continue - } - // The literals list[len(list)-bits] .. list[len(list)-bits] - // are encoded using "bits" bits, and get the values - // code, code + 1, .... The code values are - // assigned in literal order (not frequency order). - chunk := list[len(list)-int(bits):] - - sortByLiteral(chunk) - for _, node := range chunk { - h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n)) - code++ - } - list = list[0 : len(list)-int(bits)] - } -} - -// Update this Huffman Code object to be the minimum code for the specified frequency count. -// -// freq An array of frequencies, in which frequency[i] gives the frequency of literal i. -// maxBits The maximum number of bits to use for any literal. -func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { - list := h.freqcache[:len(freq)+1] - codes := h.codes[:len(freq)] - // Number of non-zero literals - count := 0 - // Set list to be the set of all non-zero literals and their frequencies - for i, f := range freq { - if f != 0 { - list[count] = literalNode{uint16(i), f} - count++ - } else { - codes[i] = 0 - } - } - list[count] = literalNode{} - - list = list[:count] - if count <= 2 { - // Handle the small cases here, because they are awkward for the general case code. With - // two or fewer literals, everything has bit length 1. - for i, node := range list { - // "list" is in order of increasing literal value. - h.codes[node.literal].set(uint16(i), 1) - } - return - } - sortByFreq(list) - - // Get the number of literals for each bit count - bitCount := h.bitCounts(list, maxBits) - // And do the assignment - h.assignEncodingAndSize(bitCount, list) -} - -// atLeastOne clamps the result between 1 and 15. -func atLeastOne(v float32) float32 { - if v < 1 { - return 1 - } - if v > 15 { - return 15 - } - return v -} - -func histogram(b []byte, h []uint16) { - if true && len(b) >= 8<<10 { - // Split for bigger inputs - histogramSplit(b, h) - } else { - h = h[:256] - for _, t := range b { - h[t]++ - } - } -} - -func histogramSplit(b []byte, h []uint16) { - // Tested, and slightly faster than 2-way. - // Writing to separate arrays and combining is also slightly slower. - h = h[:256] - for len(b)&3 != 0 { - h[b[0]]++ - b = b[1:] - } - n := len(b) / 4 - x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:] - y, z, w = y[:len(x)], z[:len(x)], w[:len(x)] - for i, t := range x { - v0 := &h[t] - v1 := &h[y[i]] - v3 := &h[w[i]] - v2 := &h[z[i]] - *v0++ - *v1++ - *v2++ - *v3++ - } -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go deleted file mode 100644 index 6c05ba8..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -// Sort sorts data. -// It makes one call to data.Len to determine n, and O(n*log(n)) calls to -// data.Less and data.Swap. The sort is not guaranteed to be stable. -func sortByFreq(data []literalNode) { - n := len(data) - quickSortByFreq(data, 0, n, maxDepth(n)) -} - -func quickSortByFreq(data []literalNode, a, b, maxDepth int) { - for b-a > 12 { // Use ShellSort for slices <= 12 elements - if maxDepth == 0 { - heapSort(data, a, b) - return - } - maxDepth-- - mlo, mhi := doPivotByFreq(data, a, b) - // Avoiding recursion on the larger subproblem guarantees - // a stack depth of at most lg(b-a). - if mlo-a < b-mhi { - quickSortByFreq(data, a, mlo, maxDepth) - a = mhi // i.e., quickSortByFreq(data, mhi, b) - } else { - quickSortByFreq(data, mhi, b, maxDepth) - b = mlo // i.e., quickSortByFreq(data, a, mlo) - } - } - if b-a > 1 { - // Do ShellSort pass with gap 6 - // It could be written in this simplified form cause b-a <= 12 - for i := a + 6; i < b; i++ { - if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq { - data[i], data[i-6] = data[i-6], data[i] - } - } - insertionSortByFreq(data, a, b) - } -} - -func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) { - m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. - if hi-lo > 40 { - // Tukey's ``Ninther,'' median of three medians of three. - s := (hi - lo) / 8 - medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s) - medianOfThreeSortByFreq(data, m, m-s, m+s) - medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s) - } - medianOfThreeSortByFreq(data, lo, m, hi-1) - - // Invariants are: - // data[lo] = pivot (set up by ChoosePivot) - // data[lo < i < a] < pivot - // data[a <= i < b] <= pivot - // data[b <= i < c] unexamined - // data[c <= i < hi-1] > pivot - // data[hi-1] >= pivot - pivot := lo - a, c := lo+1, hi-1 - - for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { - } - b := a - for { - for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot - } - for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot - } - if b >= c { - break - } - // data[b] > pivot; data[c-1] <= pivot - data[b], data[c-1] = data[c-1], data[b] - b++ - c-- - } - // If hi-c<3 then there are duplicates (by property of median of nine). - // Let's be a bit more conservative, and set border to 5. - protect := hi-c < 5 - if !protect && hi-c < (hi-lo)/4 { - // Lets test some points for equality to pivot - dups := 0 - if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot - data[c], data[hi-1] = data[hi-1], data[c] - c++ - dups++ - } - if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot - b-- - dups++ - } - // m-lo = (hi-lo)/2 > 6 - // b-lo > (hi-lo)*3/4-1 > 8 - // ==> m < b ==> data[m] <= pivot - if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot - data[m], data[b-1] = data[b-1], data[m] - b-- - dups++ - } - // if at least 2 points are equal to pivot, assume skewed distribution - protect = dups > 1 - } - if protect { - // Protect against a lot of duplicates - // Add invariant: - // data[a <= i < b] unexamined - // data[b <= i < c] = pivot - for { - for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot - } - for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot - } - if a >= b { - break - } - // data[a] == pivot; data[b-1] < pivot - data[a], data[b-1] = data[b-1], data[a] - a++ - b-- - } - } - // Swap pivot into middle - data[pivot], data[b-1] = data[b-1], data[pivot] - return b - 1, c -} - -// Insertion sort -func insertionSortByFreq(data []literalNode, a, b int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// quickSortByFreq, loosely following Bentley and McIlroy, -// ``Engineering a Sort Function,'' SP&E November 1993. - -// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. -func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) { - // sort 3 elements - if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { - data[m1], data[m0] = data[m0], data[m1] - } - // data[m0] <= data[m1] - if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq { - data[m2], data[m1] = data[m1], data[m2] - // data[m0] <= data[m2] && data[m1] < data[m2] - if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { - data[m1], data[m0] = data[m0], data[m1] - } - } - // now data[m0] <= data[m1] <= data[m2] -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go deleted file mode 100644 index 93f1aea..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -// Sort sorts data. -// It makes one call to data.Len to determine n, and O(n*log(n)) calls to -// data.Less and data.Swap. The sort is not guaranteed to be stable. -func sortByLiteral(data []literalNode) { - n := len(data) - quickSort(data, 0, n, maxDepth(n)) -} - -func quickSort(data []literalNode, a, b, maxDepth int) { - for b-a > 12 { // Use ShellSort for slices <= 12 elements - if maxDepth == 0 { - heapSort(data, a, b) - return - } - maxDepth-- - mlo, mhi := doPivot(data, a, b) - // Avoiding recursion on the larger subproblem guarantees - // a stack depth of at most lg(b-a). - if mlo-a < b-mhi { - quickSort(data, a, mlo, maxDepth) - a = mhi // i.e., quickSort(data, mhi, b) - } else { - quickSort(data, mhi, b, maxDepth) - b = mlo // i.e., quickSort(data, a, mlo) - } - } - if b-a > 1 { - // Do ShellSort pass with gap 6 - // It could be written in this simplified form cause b-a <= 12 - for i := a + 6; i < b; i++ { - if data[i].literal < data[i-6].literal { - data[i], data[i-6] = data[i-6], data[i] - } - } - insertionSort(data, a, b) - } -} -func heapSort(data []literalNode, a, b int) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDown(data, i, hi, first) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDown(data, lo, i, first) - } -} - -// siftDown implements the heap property on data[lo, hi). -// first is an offset into the array where the root of the heap lies. -func siftDown(data []literalNode, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && data[first+child].literal < data[first+child+1].literal { - child++ - } - if data[first+root].literal > data[first+child].literal { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} -func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) { - m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. - if hi-lo > 40 { - // Tukey's ``Ninther,'' median of three medians of three. - s := (hi - lo) / 8 - medianOfThree(data, lo, lo+s, lo+2*s) - medianOfThree(data, m, m-s, m+s) - medianOfThree(data, hi-1, hi-1-s, hi-1-2*s) - } - medianOfThree(data, lo, m, hi-1) - - // Invariants are: - // data[lo] = pivot (set up by ChoosePivot) - // data[lo < i < a] < pivot - // data[a <= i < b] <= pivot - // data[b <= i < c] unexamined - // data[c <= i < hi-1] > pivot - // data[hi-1] >= pivot - pivot := lo - a, c := lo+1, hi-1 - - for ; a < c && data[a].literal < data[pivot].literal; a++ { - } - b := a - for { - for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot - } - for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot - } - if b >= c { - break - } - // data[b] > pivot; data[c-1] <= pivot - data[b], data[c-1] = data[c-1], data[b] - b++ - c-- - } - // If hi-c<3 then there are duplicates (by property of median of nine). - // Let's be a bit more conservative, and set border to 5. - protect := hi-c < 5 - if !protect && hi-c < (hi-lo)/4 { - // Lets test some points for equality to pivot - dups := 0 - if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot - data[c], data[hi-1] = data[hi-1], data[c] - c++ - dups++ - } - if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot - b-- - dups++ - } - // m-lo = (hi-lo)/2 > 6 - // b-lo > (hi-lo)*3/4-1 > 8 - // ==> m < b ==> data[m] <= pivot - if data[m].literal > data[pivot].literal { // data[m] = pivot - data[m], data[b-1] = data[b-1], data[m] - b-- - dups++ - } - // if at least 2 points are equal to pivot, assume skewed distribution - protect = dups > 1 - } - if protect { - // Protect against a lot of duplicates - // Add invariant: - // data[a <= i < b] unexamined - // data[b <= i < c] = pivot - for { - for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot - } - for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot - } - if a >= b { - break - } - // data[a] == pivot; data[b-1] < pivot - data[a], data[b-1] = data[b-1], data[a] - a++ - b-- - } - } - // Swap pivot into middle - data[pivot], data[b-1] = data[b-1], data[pivot] - return b - 1, c -} - -// Insertion sort -func insertionSort(data []literalNode, a, b int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && data[j].literal < data[j-1].literal; j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// maxDepth returns a threshold at which quicksort should switch -// to heapsort. It returns 2*ceil(lg(n+1)). -func maxDepth(n int) int { - var depth int - for i := n; i > 0; i >>= 1 { - depth++ - } - return depth * 2 -} - -// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. -func medianOfThree(data []literalNode, m1, m0, m2 int) { - // sort 3 elements - if data[m1].literal < data[m0].literal { - data[m1], data[m0] = data[m0], data[m1] - } - // data[m0] <= data[m1] - if data[m2].literal < data[m1].literal { - data[m2], data[m1] = data[m1], data[m2] - // data[m0] <= data[m2] && data[m1] < data[m2] - if data[m1].literal < data[m0].literal { - data[m1], data[m0] = data[m0], data[m1] - } - } - // now data[m0] <= data[m1] <= data[m2] -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/inflate.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/inflate.go deleted file mode 100644 index 2f410d6..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/inflate.go +++ /dev/null @@ -1,829 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package flate implements the DEFLATE compressed data format, described in -// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file -// formats. -package flate - -import ( - "bufio" - "compress/flate" - "fmt" - "io" - "math/bits" - "sync" -) - -const ( - maxCodeLen = 16 // max length of Huffman code - maxCodeLenMask = 15 // mask for max length of Huffman code - // The next three numbers come from the RFC section 3.2.7, with the - // additional proviso in section 3.2.5 which implies that distance codes - // 30 and 31 should never occur in compressed data. - maxNumLit = 286 - maxNumDist = 30 - numCodes = 19 // number of codes in Huffman meta-code - - debugDecode = false -) - -// Value of length - 3 and extra bits. -type lengthExtra struct { - length, extra uint8 -} - -var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} - -var bitMask32 = [32]uint32{ - 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, - 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, - 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, - 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, -} // up to 32 bits - -// Initialize the fixedHuffmanDecoder only once upon first use. -var fixedOnce sync.Once -var fixedHuffmanDecoder huffmanDecoder - -// A CorruptInputError reports the presence of corrupt input at a given offset. -type CorruptInputError = flate.CorruptInputError - -// An InternalError reports an error in the flate code itself. -type InternalError string - -func (e InternalError) Error() string { return "flate: internal error: " + string(e) } - -// A ReadError reports an error encountered while reading input. -// -// Deprecated: No longer returned. -type ReadError = flate.ReadError - -// A WriteError reports an error encountered while writing output. -// -// Deprecated: No longer returned. -type WriteError = flate.WriteError - -// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to -// to switch to a new underlying Reader. This permits reusing a ReadCloser -// instead of allocating a new one. -type Resetter interface { - // Reset discards any buffered data and resets the Resetter as if it was - // newly initialized with the given reader. - Reset(r io.Reader, dict []byte) error -} - -// The data structure for decoding Huffman tables is based on that of -// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), -// For codes smaller than the table width, there are multiple entries -// (each combination of trailing bits has the same value). For codes -// larger than the table width, the table contains a link to an overflow -// table. The width of each entry in the link table is the maximum code -// size minus the chunk width. -// -// Note that you can do a lookup in the table even without all bits -// filled. Since the extra bits are zero, and the DEFLATE Huffman codes -// have the property that shorter codes come before longer ones, the -// bit length estimate in the result is a lower bound on the actual -// number of bits. -// -// See the following: -// http://www.gzip.org/algorithm.txt - -// chunk & 15 is number of bits -// chunk >> 4 is value, including table link - -const ( - huffmanChunkBits = 9 - huffmanNumChunks = 1 << huffmanChunkBits - huffmanCountMask = 15 - huffmanValueShift = 4 -) - -type huffmanDecoder struct { - maxRead int // the maximum number of bits we can read and not overread - chunks *[huffmanNumChunks]uint16 // chunks as described above - links [][]uint16 // overflow links - linkMask uint32 // mask the width of the link table -} - -// Initialize Huffman decoding tables from array of code lengths. -// Following this function, h is guaranteed to be initialized into a complete -// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a -// degenerate case where the tree has only a single symbol with length 1. Empty -// trees are permitted. -func (h *huffmanDecoder) init(lengths []int) bool { - // Sanity enables additional runtime tests during Huffman - // table construction. It's intended to be used during - // development to supplement the currently ad-hoc unit tests. - const sanity = false - - if h.chunks == nil { - h.chunks = new([huffmanNumChunks]uint16) - } - - if h.maxRead != 0 { - *h = huffmanDecoder{chunks: h.chunks, links: h.links} - } - - // Count number of codes of each length, - // compute maxRead and max length. - var count [maxCodeLen]int - var min, max int - for _, n := range lengths { - if n == 0 { - continue - } - if min == 0 || n < min { - min = n - } - if n > max { - max = n - } - count[n&maxCodeLenMask]++ - } - - // Empty tree. The decompressor.huffSym function will fail later if the tree - // is used. Technically, an empty tree is only valid for the HDIST tree and - // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree - // is guaranteed to fail since it will attempt to use the tree to decode the - // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is - // guaranteed to fail later since the compressed data section must be - // composed of at least one symbol (the end-of-block marker). - if max == 0 { - return true - } - - code := 0 - var nextcode [maxCodeLen]int - for i := min; i <= max; i++ { - code <<= 1 - nextcode[i&maxCodeLenMask] = code - code += count[i&maxCodeLenMask] - } - - // Check that the coding is complete (i.e., that we've - // assigned all 2-to-the-max possible bit sequences). - // Exception: To be compatible with zlib, we also need to - // accept degenerate single-code codings. See also - // TestDegenerateHuffmanCoding. - if code != 1< huffmanChunkBits { - numLinks := 1 << (uint(max) - huffmanChunkBits) - h.linkMask = uint32(numLinks - 1) - - // create link tables - link := nextcode[huffmanChunkBits+1] >> 1 - if cap(h.links) < huffmanNumChunks-link { - h.links = make([][]uint16, huffmanNumChunks-link) - } else { - h.links = h.links[:huffmanNumChunks-link] - } - for j := uint(link); j < huffmanNumChunks; j++ { - reverse := int(bits.Reverse16(uint16(j))) - reverse >>= uint(16 - huffmanChunkBits) - off := j - uint(link) - if sanity && h.chunks[reverse] != 0 { - panic("impossible: overwriting existing chunk") - } - h.chunks[reverse] = uint16(off<>= uint(16 - n) - if n <= huffmanChunkBits { - for off := reverse; off < len(h.chunks); off += 1 << uint(n) { - // We should never need to overwrite - // an existing chunk. Also, 0 is - // never a valid chunk, because the - // lower 4 "count" bits should be - // between 1 and 15. - if sanity && h.chunks[off] != 0 { - panic("impossible: overwriting existing chunk") - } - h.chunks[off] = chunk - } - } else { - j := reverse & (huffmanNumChunks - 1) - if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { - // Longer codes should have been - // associated with a link table above. - panic("impossible: not an indirect chunk") - } - value := h.chunks[j] >> huffmanValueShift - linktab := h.links[value] - reverse >>= huffmanChunkBits - for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { - if sanity && linktab[off] != 0 { - panic("impossible: overwriting existing chunk") - } - linktab[off] = chunk - } - } - } - - if sanity { - // Above we've sanity checked that we never overwrote - // an existing entry. Here we additionally check that - // we filled the tables completely. - for i, chunk := range h.chunks { - if chunk == 0 { - // As an exception, in the degenerate - // single-code case, we allow odd - // chunks to be missing. - if code == 1 && i%2 == 1 { - continue - } - panic("impossible: missing chunk") - } - } - for _, linktab := range h.links { - for _, chunk := range linktab { - if chunk == 0 { - panic("impossible: missing chunk") - } - } - } - } - - return true -} - -// Reader is the actual read interface needed by NewReader. -// If the passed in io.Reader does not also have ReadByte, -// the NewReader will introduce its own buffering. -type Reader interface { - io.Reader - io.ByteReader -} - -type step uint8 - -const ( - copyData step = iota + 1 - nextBlock - huffmanBytesBuffer - huffmanBytesReader - huffmanBufioReader - huffmanStringsReader - huffmanGenericReader -) - -// Decompress state. -type decompressor struct { - // Input source. - r Reader - roffset int64 - - // Huffman decoders for literal/length, distance. - h1, h2 huffmanDecoder - - // Length arrays used to define Huffman codes. - bits *[maxNumLit + maxNumDist]int - codebits *[numCodes]int - - // Output history, buffer. - dict dictDecoder - - // Next step in the decompression, - // and decompression state. - step step - stepState int - err error - toRead []byte - hl, hd *huffmanDecoder - copyLen int - copyDist int - - // Temporary buffer (avoids repeated allocation). - buf [4]byte - - // Input bits, in top of b. - b uint32 - - nb uint - final bool -} - -func (f *decompressor) nextBlock() { - for f.nb < 1+2 { - if f.err = f.moreBits(); f.err != nil { - return - } - } - f.final = f.b&1 == 1 - f.b >>= 1 - typ := f.b & 3 - f.b >>= 2 - f.nb -= 1 + 2 - switch typ { - case 0: - f.dataBlock() - if debugDecode { - fmt.Println("stored block") - } - case 1: - // compressed, fixed Huffman tables - f.hl = &fixedHuffmanDecoder - f.hd = nil - f.huffmanBlockDecoder() - if debugDecode { - fmt.Println("predefinied huffman block") - } - case 2: - // compressed, dynamic Huffman tables - if f.err = f.readHuffman(); f.err != nil { - break - } - f.hl = &f.h1 - f.hd = &f.h2 - f.huffmanBlockDecoder() - if debugDecode { - fmt.Println("dynamic huffman block") - } - default: - // 3 is reserved. - if debugDecode { - fmt.Println("reserved data block encountered") - } - f.err = CorruptInputError(f.roffset) - } -} - -func (f *decompressor) Read(b []byte) (int, error) { - for { - if len(f.toRead) > 0 { - n := copy(b, f.toRead) - f.toRead = f.toRead[n:] - if len(f.toRead) == 0 { - return n, f.err - } - return n, nil - } - if f.err != nil { - return 0, f.err - } - - f.doStep() - - if f.err != nil && len(f.toRead) == 0 { - f.toRead = f.dict.readFlush() // Flush what's left in case of error - } - } -} - -// WriteTo implements the io.WriteTo interface for io.Copy and friends. -func (f *decompressor) WriteTo(w io.Writer) (int64, error) { - total := int64(0) - flushed := false - for { - if len(f.toRead) > 0 { - n, err := w.Write(f.toRead) - total += int64(n) - if err != nil { - f.err = err - return total, err - } - if n != len(f.toRead) { - return total, io.ErrShortWrite - } - f.toRead = f.toRead[:0] - } - if f.err != nil && flushed { - if f.err == io.EOF { - return total, nil - } - return total, f.err - } - if f.err == nil { - f.doStep() - } - if len(f.toRead) == 0 && f.err != nil && !flushed { - f.toRead = f.dict.readFlush() // Flush what's left in case of error - flushed = true - } - } -} - -func (f *decompressor) Close() error { - if f.err == io.EOF { - return nil - } - return f.err -} - -// RFC 1951 section 3.2.7. -// Compression with dynamic Huffman codes - -var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} - -func (f *decompressor) readHuffman() error { - // HLIT[5], HDIST[5], HCLEN[4]. - for f.nb < 5+5+4 { - if err := f.moreBits(); err != nil { - return err - } - } - nlit := int(f.b&0x1F) + 257 - if nlit > maxNumLit { - if debugDecode { - fmt.Println("nlit > maxNumLit", nlit) - } - return CorruptInputError(f.roffset) - } - f.b >>= 5 - ndist := int(f.b&0x1F) + 1 - if ndist > maxNumDist { - if debugDecode { - fmt.Println("ndist > maxNumDist", ndist) - } - return CorruptInputError(f.roffset) - } - f.b >>= 5 - nclen := int(f.b&0xF) + 4 - // numCodes is 19, so nclen is always valid. - f.b >>= 4 - f.nb -= 5 + 5 + 4 - - // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. - for i := 0; i < nclen; i++ { - for f.nb < 3 { - if err := f.moreBits(); err != nil { - return err - } - } - f.codebits[codeOrder[i]] = int(f.b & 0x7) - f.b >>= 3 - f.nb -= 3 - } - for i := nclen; i < len(codeOrder); i++ { - f.codebits[codeOrder[i]] = 0 - } - if !f.h1.init(f.codebits[0:]) { - if debugDecode { - fmt.Println("init codebits failed") - } - return CorruptInputError(f.roffset) - } - - // HLIT + 257 code lengths, HDIST + 1 code lengths, - // using the code length Huffman code. - for i, n := 0, nlit+ndist; i < n; { - x, err := f.huffSym(&f.h1) - if err != nil { - return err - } - if x < 16 { - // Actual length. - f.bits[i] = x - i++ - continue - } - // Repeat previous length or zero. - var rep int - var nb uint - var b int - switch x { - default: - return InternalError("unexpected length code") - case 16: - rep = 3 - nb = 2 - if i == 0 { - if debugDecode { - fmt.Println("i==0") - } - return CorruptInputError(f.roffset) - } - b = f.bits[i-1] - case 17: - rep = 3 - nb = 3 - b = 0 - case 18: - rep = 11 - nb = 7 - b = 0 - } - for f.nb < nb { - if err := f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits:", err) - } - return err - } - } - rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1)) - f.b >>= nb & regSizeMaskUint32 - f.nb -= nb - if i+rep > n { - if debugDecode { - fmt.Println("i+rep > n", i, rep, n) - } - return CorruptInputError(f.roffset) - } - for j := 0; j < rep; j++ { - f.bits[i] = b - i++ - } - } - - if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { - if debugDecode { - fmt.Println("init2 failed") - } - return CorruptInputError(f.roffset) - } - - // As an optimization, we can initialize the maxRead bits to read at a time - // for the HLIT tree to the length of the EOB marker since we know that - // every block must terminate with one. This preserves the property that - // we never read any extra bytes after the end of the DEFLATE stream. - if f.h1.maxRead < f.bits[endBlockMarker] { - f.h1.maxRead = f.bits[endBlockMarker] - } - if !f.final { - // If not the final block, the smallest block possible is - // a predefined table, BTYPE=01, with a single EOB marker. - // This will take up 3 + 7 bits. - f.h1.maxRead += 10 - } - - return nil -} - -// Copy a single uncompressed data block from input to output. -func (f *decompressor) dataBlock() { - // Uncompressed. - // Discard current half-byte. - left := (f.nb) & 7 - f.nb -= left - f.b >>= left - - offBytes := f.nb >> 3 - // Unfilled values will be overwritten. - f.buf[0] = uint8(f.b) - f.buf[1] = uint8(f.b >> 8) - f.buf[2] = uint8(f.b >> 16) - f.buf[3] = uint8(f.b >> 24) - - f.roffset += int64(offBytes) - f.nb, f.b = 0, 0 - - // Length then ones-complement of length. - nr, err := io.ReadFull(f.r, f.buf[offBytes:4]) - f.roffset += int64(nr) - if err != nil { - f.err = noEOF(err) - return - } - n := uint16(f.buf[0]) | uint16(f.buf[1])<<8 - nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8 - if nn != ^n { - if debugDecode { - ncomp := ^n - fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp) - } - f.err = CorruptInputError(f.roffset) - return - } - - if n == 0 { - f.toRead = f.dict.readFlush() - f.finishBlock() - return - } - - f.copyLen = int(n) - f.copyData() -} - -// copyData copies f.copyLen bytes from the underlying reader into f.hist. -// It pauses for reads when f.hist is full. -func (f *decompressor) copyData() { - buf := f.dict.writeSlice() - if len(buf) > f.copyLen { - buf = buf[:f.copyLen] - } - - cnt, err := io.ReadFull(f.r, buf) - f.roffset += int64(cnt) - f.copyLen -= cnt - f.dict.writeMark(cnt) - if err != nil { - f.err = noEOF(err) - return - } - - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = copyData - return - } - f.finishBlock() -} - -func (f *decompressor) finishBlock() { - if f.final { - if f.dict.availRead() > 0 { - f.toRead = f.dict.readFlush() - } - f.err = io.EOF - } - f.step = nextBlock -} - -func (f *decompressor) doStep() { - switch f.step { - case copyData: - f.copyData() - case nextBlock: - f.nextBlock() - case huffmanBytesBuffer: - f.huffmanBytesBuffer() - case huffmanBytesReader: - f.huffmanBytesReader() - case huffmanBufioReader: - f.huffmanBufioReader() - case huffmanStringsReader: - f.huffmanStringsReader() - case huffmanGenericReader: - f.huffmanGenericReader() - default: - panic("BUG: unexpected step state") - } -} - -// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF. -func noEOF(e error) error { - if e == io.EOF { - return io.ErrUnexpectedEOF - } - return e -} - -func (f *decompressor) moreBits() error { - c, err := f.r.ReadByte() - if err != nil { - return noEOF(err) - } - f.roffset++ - f.b |= uint32(c) << (f.nb & regSizeMaskUint32) - f.nb += 8 - return nil -} - -// Read the next Huffman-encoded symbol from f according to h. -func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(h.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := f.r.ReadByte() - if err != nil { - f.b = b - f.nb = nb - return 0, noEOF(err) - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := h.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return 0, f.err - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - return int(chunk >> huffmanValueShift), nil - } - } -} - -func makeReader(r io.Reader) Reader { - if rr, ok := r.(Reader); ok { - return rr - } - return bufio.NewReader(r) -} - -func fixedHuffmanDecoderInit() { - fixedOnce.Do(func() { - // These come from the RFC section 3.2.6. - var bits [288]int - for i := 0; i < 144; i++ { - bits[i] = 8 - } - for i := 144; i < 256; i++ { - bits[i] = 9 - } - for i := 256; i < 280; i++ { - bits[i] = 7 - } - for i := 280; i < 288; i++ { - bits[i] = 8 - } - fixedHuffmanDecoder.init(bits[:]) - }) -} - -func (f *decompressor) Reset(r io.Reader, dict []byte) error { - *f = decompressor{ - r: makeReader(r), - bits: f.bits, - codebits: f.codebits, - h1: f.h1, - h2: f.h2, - dict: f.dict, - step: nextBlock, - } - f.dict.init(maxMatchOffset, dict) - return nil -} - -// NewReader returns a new ReadCloser that can be used -// to read the uncompressed version of r. -// If r does not also implement io.ByteReader, -// the decompressor may read more data than necessary from r. -// It is the caller's responsibility to call Close on the ReadCloser -// when finished reading. -// -// The ReadCloser returned by NewReader also implements Resetter. -func NewReader(r io.Reader) io.ReadCloser { - fixedHuffmanDecoderInit() - - var f decompressor - f.r = makeReader(r) - f.bits = new([maxNumLit + maxNumDist]int) - f.codebits = new([numCodes]int) - f.step = nextBlock - f.dict.init(maxMatchOffset, nil) - return &f -} - -// NewReaderDict is like NewReader but initializes the reader -// with a preset dictionary. The returned Reader behaves as if -// the uncompressed data stream started with the given dictionary, -// which has already been read. NewReaderDict is typically used -// to read data compressed by NewWriterDict. -// -// The ReadCloser returned by NewReader also implements Resetter. -func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { - fixedHuffmanDecoderInit() - - var f decompressor - f.r = makeReader(r) - f.bits = new([maxNumLit + maxNumDist]int) - f.codebits = new([numCodes]int) - f.step = nextBlock - f.dict.init(maxMatchOffset, dict) - return &f -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/inflate_gen.go deleted file mode 100644 index 2b2f993..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/inflate_gen.go +++ /dev/null @@ -1,1283 +0,0 @@ -// Code generated by go generate gen_inflate.go. DO NOT EDIT. - -package flate - -import ( - "bufio" - "bytes" - "fmt" - "math/bits" - "strings" -) - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBytesBuffer() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*bytes.Buffer) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = huffmanBytesBuffer - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = huffmanBytesBuffer // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBytesReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*bytes.Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = huffmanBytesReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = huffmanBytesReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBufioReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*bufio.Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = huffmanBufioReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = huffmanBufioReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanStringsReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*strings.Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = huffmanStringsReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = huffmanStringsReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanGenericReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = huffmanGenericReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = huffmanGenericReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -func (f *decompressor) huffmanBlockDecoder() { - switch f.r.(type) { - case *bytes.Buffer: - f.huffmanBytesBuffer() - case *bytes.Reader: - f.huffmanBytesReader() - case *bufio.Reader: - f.huffmanBufioReader() - case *strings.Reader: - f.huffmanStringsReader() - case Reader: - f.huffmanGenericReader() - default: - f.huffmanGenericReader() - } -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/level1.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/level1.go deleted file mode 100644 index 703b9a8..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/level1.go +++ /dev/null @@ -1,241 +0,0 @@ -package flate - -import ( - "encoding/binary" - "fmt" - "math/bits" -) - -// fastGen maintains the table for matches, -// and the previous byte block for level 2. -// This is the generic implementation. -type fastEncL1 struct { - fastGen - table [tableSize]tableEntry -} - -// EncodeL1 uses a similar algorithm to level 1 -func (e *fastEncL1) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashBytes = 5 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - - for { - const skipLog = 5 - const doEvery = 2 - - nextS := s - var candidate tableEntry - for { - nextHash := hashLen(cv, tableBits, hashBytes) - candidate = e.table[nextHash] - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - - now := load6432(src, nextS) - e.table[nextHash] = tableEntry{offset: s + e.cur} - nextHash = hashLen(now, tableBits, hashBytes) - - offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - e.table[nextHash] = tableEntry{offset: nextS + e.cur} - break - } - - // Do one right away... - cv = now - s = nextS - nextS++ - candidate = e.table[nextHash] - now >>= 8 - e.table[nextHash] = tableEntry{offset: s + e.cur} - - offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - e.table[nextHash] = tableEntry{offset: nextS + e.cur} - break - } - cv = now - s = nextS - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - t := candidate.offset - e.cur - var l = int32(4) - if false { - l = e.matchlenLong(s+4, t+4, src) + 4 - } else { - // inlined: - a := src[s+4:] - b := src[t+4:] - for len(a) >= 8 { - if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { - l += int32(bits.TrailingZeros64(diff) >> 3) - break - } - l += 8 - a = a[8:] - b = b[8:] - } - if len(a) < 8 { - b = b[:len(a)] - for i := range a { - if a[i] != b[i] { - break - } - l++ - } - } - } - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - // Save the match found - if false { - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - } else { - // Inlined... - xoffset := uint32(s - t - baseMatchOffset) - xlength := l - oc := offsetCode(xoffset) - xoffset |= oc << 16 - for xlength > 0 { - xl := xlength - if xl > 258 { - if xl > 258+baseMatchLength { - xl = 258 - } else { - xl = 258 - baseMatchLength - } - } - xlength -= xl - xl -= baseMatchLength - dst.extraHist[lengthCodes1[uint8(xl)]]++ - dst.offHist[oc]++ - dst.tokens[dst.n] = token(matchType | uint32(xl)<= s { - s = nextS + 1 - } - if s >= sLimit { - // Index first pair after match end. - if int(s+l+8) < len(src) { - cv := load6432(src, s) - e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur} - } - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load6432(src, s-2) - o := e.cur + s - 2 - prevHash := hashLen(x, tableBits, hashBytes) - e.table[prevHash] = tableEntry{offset: o} - x >>= 16 - currHash := hashLen(x, tableBits, hashBytes) - candidate = e.table[currHash] - e.table[currHash] = tableEntry{offset: o + 2} - - offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { - cv = x >> 8 - s++ - break - } - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/level2.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/level2.go deleted file mode 100644 index 876dfbe..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/level2.go +++ /dev/null @@ -1,214 +0,0 @@ -package flate - -import "fmt" - -// fastGen maintains the table for matches, -// and the previous byte block for level 2. -// This is the generic implementation. -type fastEncL2 struct { - fastGen - table [bTableSize]tableEntry -} - -// EncodeL2 uses a similar algorithm to level 1, but is capable -// of matching across blocks giving better compression at a small slowdown. -func (e *fastEncL2) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashBytes = 5 - ) - - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - // When should we start skipping if we haven't found matches in a long while. - const skipLog = 5 - const doEvery = 2 - - nextS := s - var candidate tableEntry - for { - nextHash := hashLen(cv, bTableBits, hashBytes) - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - candidate = e.table[nextHash] - now := load6432(src, nextS) - e.table[nextHash] = tableEntry{offset: s + e.cur} - nextHash = hashLen(now, bTableBits, hashBytes) - - offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - e.table[nextHash] = tableEntry{offset: nextS + e.cur} - break - } - - // Do one right away... - cv = now - s = nextS - nextS++ - candidate = e.table[nextHash] - now >>= 8 - e.table[nextHash] = tableEntry{offset: s + e.cur} - - offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - break - } - cv = now - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - // Index first pair after match end. - if int(s+l+8) < len(src) { - cv := load6432(src, s) - e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur} - } - goto emitRemainder - } - - // Store every second hash in-between, but offset by 1. - for i := s - l + 2; i < s-5; i += 7 { - x := load6432(src, i) - nextHash := hashLen(x, bTableBits, hashBytes) - e.table[nextHash] = tableEntry{offset: e.cur + i} - // Skip one - x >>= 16 - nextHash = hashLen(x, bTableBits, hashBytes) - e.table[nextHash] = tableEntry{offset: e.cur + i + 2} - // Skip one - x >>= 16 - nextHash = hashLen(x, bTableBits, hashBytes) - e.table[nextHash] = tableEntry{offset: e.cur + i + 4} - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 to s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load6432(src, s-2) - o := e.cur + s - 2 - prevHash := hashLen(x, bTableBits, hashBytes) - prevHash2 := hashLen(x>>8, bTableBits, hashBytes) - e.table[prevHash] = tableEntry{offset: o} - e.table[prevHash2] = tableEntry{offset: o + 1} - currHash := hashLen(x>>16, bTableBits, hashBytes) - candidate = e.table[currHash] - e.table[currHash] = tableEntry{offset: o + 2} - - offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) { - cv = x >> 24 - s++ - break - } - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/level3.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/level3.go deleted file mode 100644 index 7aa2b72..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/level3.go +++ /dev/null @@ -1,241 +0,0 @@ -package flate - -import "fmt" - -// fastEncL3 -type fastEncL3 struct { - fastGen - table [1 << 16]tableEntryPrev -} - -// Encode uses a similar algorithm to level 2, will check up to two candidates. -func (e *fastEncL3) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - tableBits = 16 - tableSize = 1 << tableBits - hashBytes = 5 - ) - - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntryPrev{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i] - if v.Cur.offset <= minOff { - v.Cur.offset = 0 - } else { - v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset - } - if v.Prev.offset <= minOff { - v.Prev.offset = 0 - } else { - v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset - } - e.table[i] = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // Skip if too small. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - const skipLog = 7 - nextS := s - var candidate tableEntry - for { - nextHash := hashLen(cv, tableBits, hashBytes) - s = nextS - nextS = s + 1 + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - candidates := e.table[nextHash] - now := load6432(src, nextS) - - // Safe offset distance until s + 4... - minOffset := e.cur + s - (maxMatchOffset - 4) - e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}} - - // Check both candidates - candidate = candidates.Cur - if candidate.offset < minOffset { - cv = now - // Previous will also be invalid, we have nothing. - continue - } - - if uint32(cv) == load3232(src, candidate.offset-e.cur) { - if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) { - break - } - // Both match and are valid, pick longest. - offset := s - (candidate.offset - e.cur) - o2 := s - (candidates.Prev.offset - e.cur) - l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) - if l2 > l1 { - candidate = candidates.Prev - } - break - } else { - // We only check if value mismatches. - // Offset will always be invalid in other cases. - candidate = candidates.Prev - if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - break - } - } - cv = now - } - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - // - t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - t += l - // Index first pair after match end. - if int(t+8) < len(src) && t > 0 { - cv = load6432(src, t) - nextHash := hashLen(cv, tableBits, hashBytes) - e.table[nextHash] = tableEntryPrev{ - Prev: e.table[nextHash].Cur, - Cur: tableEntry{offset: e.cur + t}, - } - } - goto emitRemainder - } - - // Store every 5th hash in-between. - for i := s - l + 2; i < s-5; i += 6 { - nextHash := hashLen(load6432(src, i), tableBits, hashBytes) - e.table[nextHash] = tableEntryPrev{ - Prev: e.table[nextHash].Cur, - Cur: tableEntry{offset: e.cur + i}} - } - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 to s. - x := load6432(src, s-2) - prevHash := hashLen(x, tableBits, hashBytes) - - e.table[prevHash] = tableEntryPrev{ - Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 2}, - } - x >>= 8 - prevHash = hashLen(x, tableBits, hashBytes) - - e.table[prevHash] = tableEntryPrev{ - Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 1}, - } - x >>= 8 - currHash := hashLen(x, tableBits, hashBytes) - candidates := e.table[currHash] - cv = x - e.table[currHash] = tableEntryPrev{ - Prev: candidates.Cur, - Cur: tableEntry{offset: s + e.cur}, - } - - // Check both candidates - candidate = candidates.Cur - minOffset := e.cur + s - (maxMatchOffset - 4) - - if candidate.offset > minOffset { - if uint32(cv) == load3232(src, candidate.offset-e.cur) { - // Found a match... - continue - } - candidate = candidates.Prev - if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - // Match at prev... - continue - } - } - cv = x >> 8 - s++ - break - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/level4.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/level4.go deleted file mode 100644 index 23c08b3..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/level4.go +++ /dev/null @@ -1,221 +0,0 @@ -package flate - -import "fmt" - -type fastEncL4 struct { - fastGen - table [tableSize]tableEntry - bTable [tableSize]tableEntry -} - -func (e *fastEncL4) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashShortBytes = 4 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.bTable[:] { - e.bTable[i] = tableEntry{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - for i := range e.bTable[:] { - v := e.bTable[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.bTable[i].offset = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - const skipLog = 6 - const doEvery = 1 - - nextS := s - var t int32 - for { - nextHashS := hashLen(cv, tableBits, hashShortBytes) - nextHashL := hash7(cv, tableBits) - - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - // Fetch a short+long candidate - sCandidate := e.table[nextHashS] - lCandidate := e.bTable[nextHashL] - next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur} - e.table[nextHashS] = entry - e.bTable[nextHashL] = entry - - t = lCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) { - // We got a long match. Use that. - break - } - - t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { - // Found a 4 match... - lCandidate = e.bTable[hash7(next, tableBits)] - - // If the next long is a candidate, check if we should use that instead... - lOff := nextS - (lCandidate.offset - e.cur) - if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) { - l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) - if l2 > l1 { - s = nextS - t = lCandidate.offset - e.cur - } - } - break - } - cv = next - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - // Extend the 4-byte match as long as possible. - l := e.matchlenLong(s+4, t+4, src) + 4 - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - if debugDeflate { - if t >= s { - panic("s-t") - } - if (s - t) > maxMatchOffset { - panic(fmt.Sprintln("mmo", t)) - } - if l < baseMatchLength { - panic("bml") - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - // Index first pair after match end. - if int(s+8) < len(src) { - cv := load6432(src, s) - e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur} - e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur} - } - goto emitRemainder - } - - // Store every 3rd hash in-between - if true { - i := nextS - if i < s-1 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - e.bTable[hash7(cv, tableBits)] = t - e.bTable[hash7(cv>>8, tableBits)] = t2 - e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 - - i += 3 - for ; i < s-1; i += 3 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - e.bTable[hash7(cv, tableBits)] = t - e.bTable[hash7(cv>>8, tableBits)] = t2 - e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 - } - } - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. - x := load6432(src, s-1) - o := e.cur + s - 1 - prevHashS := hashLen(x, tableBits, hashShortBytes) - prevHashL := hash7(x, tableBits) - e.table[prevHashS] = tableEntry{offset: o} - e.bTable[prevHashL] = tableEntry{offset: o} - cv = x >> 8 - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/level5.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/level5.go deleted file mode 100644 index 1f61ec1..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/level5.go +++ /dev/null @@ -1,708 +0,0 @@ -package flate - -import "fmt" - -type fastEncL5 struct { - fastGen - table [tableSize]tableEntry - bTable [tableSize]tableEntryPrev -} - -func (e *fastEncL5) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashShortBytes = 4 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.bTable[:] { - e.bTable[i] = tableEntryPrev{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - for i := range e.bTable[:] { - v := e.bTable[i] - if v.Cur.offset <= minOff { - v.Cur.offset = 0 - v.Prev.offset = 0 - } else { - v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset - if v.Prev.offset <= minOff { - v.Prev.offset = 0 - } else { - v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset - } - } - e.bTable[i] = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - const skipLog = 6 - const doEvery = 1 - - nextS := s - var l int32 - var t int32 - for { - nextHashS := hashLen(cv, tableBits, hashShortBytes) - nextHashL := hash7(cv, tableBits) - - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - // Fetch a short+long candidate - sCandidate := e.table[nextHashS] - lCandidate := e.bTable[nextHashL] - next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur} - e.table[nextHashS] = entry - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = entry, eLong.Cur - - nextHashS = hashLen(next, tableBits, hashShortBytes) - nextHashL = hash7(next, tableBits) - - t = lCandidate.Cur.offset - e.cur - if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 - if ml1 > l { - t = t2 - l = ml1 - break - } - } - break - } - t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - break - } - } - - t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { - // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 - lCandidate = e.bTable[nextHashL] - // Store the next match - - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - // If the next long is a candidate, use that... - t2 := lCandidate.Cur.offset - e.cur - if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - // If the previous long is a candidate, use that... - t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - } - break - } - cv = next - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - if l == 0 { - // Extend the 4-byte match as long as possible. - l = e.matchlenLong(s+4, t+4, src) + 4 - } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) - } - - // Try to locate a better match by checking the end of best match... - if sAt := s + l; l < 30 && sAt < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is 2/3 bytes depending on input. - // 3 is only a little better when it is but sometimes a lot worse. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 2 - eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset - t2 := eLong - e.cur - l + skipBeginning - s2 := s + skipBeginning - off := s2 - t2 - if t2 >= 0 && off < maxMatchOffset && off > 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { - t = t2 - l = l2 - s = s2 - } - } - } - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - if debugDeflate { - if t >= s { - panic(fmt.Sprintln("s-t", s, t)) - } - if (s - t) > maxMatchOffset { - panic(fmt.Sprintln("mmo", s-t)) - } - if l < baseMatchLength { - panic("bml") - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - goto emitRemainder - } - - // Store every 3rd hash in-between. - if true { - const hashEvery = 3 - i := s - l + 1 - if i < s-1 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - - // Do an long at i+1 - cv >>= 8 - t = tableEntry{offset: t.offset + 1} - eLong = &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - - // We only have enough bits for a short entry at i+2 - cv >>= 8 - t = tableEntry{offset: t.offset + 1} - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - - // Skip one - otherwise we risk hitting 's' - i += 4 - for ; i < s-1; i += hashEvery { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 - } - } - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. - x := load6432(src, s-1) - o := e.cur + s - 1 - prevHashS := hashLen(x, tableBits, hashShortBytes) - prevHashL := hash7(x, tableBits) - e.table[prevHashS] = tableEntry{offset: o} - eLong := &e.bTable[prevHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur - cv = x >> 8 - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} - -// fastEncL5Window is a level 5 encoder, -// but with a custom window size. -type fastEncL5Window struct { - hist []byte - cur int32 - maxOffset int32 - table [tableSize]tableEntry - bTable [tableSize]tableEntryPrev -} - -func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashShortBytes = 4 - ) - maxMatchOffset := e.maxOffset - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.bTable[:] { - e.bTable[i] = tableEntryPrev{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - for i := range e.bTable[:] { - v := e.bTable[i] - if v.Cur.offset <= minOff { - v.Cur.offset = 0 - v.Prev.offset = 0 - } else { - v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset - if v.Prev.offset <= minOff { - v.Prev.offset = 0 - } else { - v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset - } - } - e.bTable[i] = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - const skipLog = 6 - const doEvery = 1 - - nextS := s - var l int32 - var t int32 - for { - nextHashS := hashLen(cv, tableBits, hashShortBytes) - nextHashL := hash7(cv, tableBits) - - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - // Fetch a short+long candidate - sCandidate := e.table[nextHashS] - lCandidate := e.bTable[nextHashL] - next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur} - e.table[nextHashS] = entry - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = entry, eLong.Cur - - nextHashS = hashLen(next, tableBits, hashShortBytes) - nextHashL = hash7(next, tableBits) - - t = lCandidate.Cur.offset - e.cur - if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 - if ml1 > l { - t = t2 - l = ml1 - break - } - } - break - } - t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - break - } - } - - t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { - // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 - lCandidate = e.bTable[nextHashL] - // Store the next match - - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - // If the next long is a candidate, use that... - t2 := lCandidate.Cur.offset - e.cur - if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - // If the previous long is a candidate, use that... - t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - } - break - } - cv = next - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - if l == 0 { - // Extend the 4-byte match as long as possible. - l = e.matchlenLong(s+4, t+4, src) + 4 - } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) - } - - // Try to locate a better match by checking the end of best match... - if sAt := s + l; l < 30 && sAt < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is 2/3 bytes depending on input. - // 3 is only a little better when it is but sometimes a lot worse. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 2 - eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset - t2 := eLong - e.cur - l + skipBeginning - s2 := s + skipBeginning - off := s2 - t2 - if t2 >= 0 && off < maxMatchOffset && off > 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { - t = t2 - l = l2 - s = s2 - } - } - } - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - if debugDeflate { - if t >= s { - panic(fmt.Sprintln("s-t", s, t)) - } - if (s - t) > maxMatchOffset { - panic(fmt.Sprintln("mmo", s-t)) - } - if l < baseMatchLength { - panic("bml") - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - goto emitRemainder - } - - // Store every 3rd hash in-between. - if true { - const hashEvery = 3 - i := s - l + 1 - if i < s-1 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - - // Do an long at i+1 - cv >>= 8 - t = tableEntry{offset: t.offset + 1} - eLong = &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - - // We only have enough bits for a short entry at i+2 - cv >>= 8 - t = tableEntry{offset: t.offset + 1} - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - - // Skip one - otherwise we risk hitting 's' - i += 4 - for ; i < s-1; i += hashEvery { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 - } - } - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. - x := load6432(src, s-1) - o := e.cur + s - 1 - prevHashS := hashLen(x, tableBits, hashShortBytes) - prevHashL := hash7(x, tableBits) - e.table[prevHashS] = tableEntry{offset: o} - eLong := &e.bTable[prevHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur - cv = x >> 8 - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} - -// Reset the encoding table. -func (e *fastEncL5Window) Reset() { - // We keep the same allocs, since we are compressing the same block sizes. - if cap(e.hist) < allocHistory { - e.hist = make([]byte, 0, allocHistory) - } - - // We offset current position so everything will be out of reach. - // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. - if e.cur <= int32(bufferReset) { - e.cur += e.maxOffset + int32(len(e.hist)) - } - e.hist = e.hist[:0] -} - -func (e *fastEncL5Window) addBlock(src []byte) int32 { - // check if we have space already - maxMatchOffset := e.maxOffset - - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - e.hist = make([]byte, 0, allocHistory) - } else { - if cap(e.hist) < int(maxMatchOffset*2) { - panic("unexpected buffer size") - } - // Move down - offset := int32(len(e.hist)) - maxMatchOffset - copy(e.hist[0:maxMatchOffset], e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:maxMatchOffset] - } - } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} - -// matchlen will return the match length between offsets and t in src. -// The maximum length returned is maxMatchLength - 4. -// It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 { - if debugDecode { - if t >= s { - panic(fmt.Sprint("t >=s:", t, s)) - } - if int(s) >= len(src) { - panic(fmt.Sprint("s >= len(src):", s, len(src))) - } - if t < 0 { - panic(fmt.Sprint("t < 0:", t)) - } - if s-t > e.maxOffset { - panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) - } - } - s1 := int(s) + maxMatchLength - 4 - if s1 > len(src) { - s1 = len(src) - } - - // Extend the match to be as long as possible. - return int32(matchLen(src[s:s1], src[t:])) -} - -// matchlenLong will return the match length between offsets and t in src. -// It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 { - if debugDeflate { - if t >= s { - panic(fmt.Sprint("t >=s:", t, s)) - } - if int(s) >= len(src) { - panic(fmt.Sprint("s >= len(src):", s, len(src))) - } - if t < 0 { - panic(fmt.Sprint("t < 0:", t)) - } - if s-t > e.maxOffset { - panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) - } - } - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/level6.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/level6.go deleted file mode 100644 index f1e9d98..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/level6.go +++ /dev/null @@ -1,325 +0,0 @@ -package flate - -import "fmt" - -type fastEncL6 struct { - fastGen - table [tableSize]tableEntry - bTable [tableSize]tableEntryPrev -} - -func (e *fastEncL6) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashShortBytes = 4 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.bTable[:] { - e.bTable[i] = tableEntryPrev{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - for i := range e.bTable[:] { - v := e.bTable[i] - if v.Cur.offset <= minOff { - v.Cur.offset = 0 - v.Prev.offset = 0 - } else { - v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset - if v.Prev.offset <= minOff { - v.Prev.offset = 0 - } else { - v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset - } - } - e.bTable[i] = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - // Repeat MUST be > 1 and within range - repeat := int32(1) - for { - const skipLog = 7 - const doEvery = 1 - - nextS := s - var l int32 - var t int32 - for { - nextHashS := hashLen(cv, tableBits, hashShortBytes) - nextHashL := hash7(cv, tableBits) - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - // Fetch a short+long candidate - sCandidate := e.table[nextHashS] - lCandidate := e.bTable[nextHashL] - next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur} - e.table[nextHashS] = entry - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = entry, eLong.Cur - - // Calculate hashes of 'next' - nextHashS = hashLen(next, tableBits, hashShortBytes) - nextHashL = hash7(next, tableBits) - - t = lCandidate.Cur.offset - e.cur - if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { - // Long candidate matches at least 4 bytes. - - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - // Check the previous long candidate as well. - t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 - if ml1 > l { - t = t2 - l = ml1 - break - } - } - break - } - // Current value did not match, but check if previous long value does. - t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - break - } - } - - t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { - // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 - - // Look up next long candidate (at nextS) - lCandidate = e.bTable[nextHashL] - - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - // Check repeat at s + repOff - const repOff = 1 - t2 := s - repeat + repOff - if load3232(src, t2) == uint32(cv>>(8*repOff)) { - ml := e.matchlen(s+4+repOff, t2+4, src) + 4 - if ml > l { - t = t2 - l = ml - s += repOff - // Not worth checking more. - break - } - } - - // If the next long is a candidate, use that... - t2 = lCandidate.Cur.offset - e.cur - if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - // This is ok, but check previous as well. - } - } - // If the previous long is a candidate, use that... - t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - } - break - } - cv = next - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - // Extend the 4-byte match as long as possible. - if l == 0 { - l = e.matchlenLong(s+4, t+4, src) + 4 - } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) - } - - // Try to locate a better match by checking the end-of-match... - if sAt := s + l; sAt < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is 2/3 bytes depending on input. - // 3 is only a little better when it is but sometimes a lot worse. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 2 - eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)] - // Test current - t2 := eLong.Cur.offset - e.cur - l + skipBeginning - s2 := s + skipBeginning - off := s2 - t2 - if off < maxMatchOffset { - if off > 0 && t2 >= 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { - t = t2 - l = l2 - s = s2 - } - } - // Test next: - t2 = eLong.Prev.offset - e.cur - l + skipBeginning - off := s2 - t2 - if off > 0 && off < maxMatchOffset && t2 >= 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { - t = t2 - l = l2 - s = s2 - } - } - } - } - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - if false { - if t >= s { - panic(fmt.Sprintln("s-t", s, t)) - } - if (s - t) > maxMatchOffset { - panic(fmt.Sprintln("mmo", s-t)) - } - if l < baseMatchLength { - panic("bml") - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - repeat = s - t - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - // Index after match end. - for i := nextS + 1; i < int32(len(src))-8; i += 2 { - cv := load6432(src, i) - e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur} - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur - } - goto emitRemainder - } - - // Store every long hash in-between and every second short. - if true { - for i := nextS + 1; i < s-1; i += 2 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - eLong := &e.bTable[hash7(cv, tableBits)] - eLong2 := &e.bTable[hash7(cv>>8, tableBits)] - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - eLong.Cur, eLong.Prev = t, eLong.Cur - eLong2.Cur, eLong2.Prev = t2, eLong2.Cur - } - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. - cv = load6432(src, s) - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go deleted file mode 100644 index 4bd3885..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package flate - -// matchLen returns how many bytes match in a and b -// -// It assumes that: -// -// len(a) <= len(b) and len(a) > 0 -// -//go:noescape -func matchLen(a []byte, b []byte) int diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/backend/services/controller/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s deleted file mode 100644 index 9a7655c..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s +++ /dev/null @@ -1,68 +0,0 @@ -// Copied from S2 implementation. - -//go:build !appengine && !noasm && gc && !noasm - -#include "textflag.h" - -// func matchLen(a []byte, b []byte) int -// Requires: BMI -TEXT ·matchLen(SB), NOSPLIT, $0-56 - MOVQ a_base+0(FP), AX - MOVQ b_base+24(FP), CX - MOVQ a_len+8(FP), DX - - // matchLen - XORL SI, SI - CMPL DX, $0x08 - JB matchlen_match4_standalone - -matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone - -#ifdef GOAMD64_v3 - TZCNTQ BX, BX -#else - BSFQ BX, BX -#endif - SARQ $0x03, BX - LEAL (SI)(BX*1), SI - JMP gen_match_len_end - -matchlen_loop_standalone: - LEAL -8(DX), DX - LEAL 8(SI), SI - CMPL DX, $0x08 - JAE matchlen_loopback_standalone - -matchlen_match4_standalone: - CMPL DX, $0x04 - JB matchlen_match2_standalone - MOVL (AX)(SI*1), BX - CMPL (CX)(SI*1), BX - JNE matchlen_match2_standalone - LEAL -4(DX), DX - LEAL 4(SI), SI - -matchlen_match2_standalone: - CMPL DX, $0x02 - JB matchlen_match1_standalone - MOVW (AX)(SI*1), BX - CMPW (CX)(SI*1), BX - JNE matchlen_match1_standalone - LEAL -2(DX), DX - LEAL 2(SI), SI - -matchlen_match1_standalone: - CMPL DX, $0x01 - JB gen_match_len_end - MOVB (AX)(SI*1), BL - CMPB (CX)(SI*1), BL - JNE gen_match_len_end - INCL SI - -gen_match_len_end: - MOVQ SI, ret+48(FP) - RET diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/matchlen_generic.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/matchlen_generic.go deleted file mode 100644 index ad5cd81..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/matchlen_generic.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package flate - -import ( - "encoding/binary" - "math/bits" -) - -// matchLen returns the maximum common prefix length of a and b. -// a must be the shortest of the two. -func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) - if diff != 0 { - return n + bits.TrailingZeros64(diff)>>3 - } - n += 8 - } - - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n - -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/regmask_amd64.go deleted file mode 100644 index 6ed2806..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/regmask_amd64.go +++ /dev/null @@ -1,37 +0,0 @@ -package flate - -const ( - // Masks for shifts with register sizes of the shift value. - // This can be used to work around the x86 design of shifting by mod register size. - // It can be used when a variable shift is always smaller than the register size. - - // reg8SizeMaskX - shift value is 8 bits, shifted is X - reg8SizeMask8 = 7 - reg8SizeMask16 = 15 - reg8SizeMask32 = 31 - reg8SizeMask64 = 63 - - // reg16SizeMaskX - shift value is 16 bits, shifted is X - reg16SizeMask8 = reg8SizeMask8 - reg16SizeMask16 = reg8SizeMask16 - reg16SizeMask32 = reg8SizeMask32 - reg16SizeMask64 = reg8SizeMask64 - - // reg32SizeMaskX - shift value is 32 bits, shifted is X - reg32SizeMask8 = reg8SizeMask8 - reg32SizeMask16 = reg8SizeMask16 - reg32SizeMask32 = reg8SizeMask32 - reg32SizeMask64 = reg8SizeMask64 - - // reg64SizeMaskX - shift value is 64 bits, shifted is X - reg64SizeMask8 = reg8SizeMask8 - reg64SizeMask16 = reg8SizeMask16 - reg64SizeMask32 = reg8SizeMask32 - reg64SizeMask64 = reg8SizeMask64 - - // regSizeMaskUintX - shift value is uint, shifted is X - regSizeMaskUint8 = reg8SizeMask8 - regSizeMaskUint16 = reg8SizeMask16 - regSizeMaskUint32 = reg8SizeMask32 - regSizeMaskUint64 = reg8SizeMask64 -) diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/regmask_other.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/regmask_other.go deleted file mode 100644 index 1b7a2cb..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/regmask_other.go +++ /dev/null @@ -1,40 +0,0 @@ -//go:build !amd64 -// +build !amd64 - -package flate - -const ( - // Masks for shifts with register sizes of the shift value. - // This can be used to work around the x86 design of shifting by mod register size. - // It can be used when a variable shift is always smaller than the register size. - - // reg8SizeMaskX - shift value is 8 bits, shifted is X - reg8SizeMask8 = 0xff - reg8SizeMask16 = 0xff - reg8SizeMask32 = 0xff - reg8SizeMask64 = 0xff - - // reg16SizeMaskX - shift value is 16 bits, shifted is X - reg16SizeMask8 = 0xffff - reg16SizeMask16 = 0xffff - reg16SizeMask32 = 0xffff - reg16SizeMask64 = 0xffff - - // reg32SizeMaskX - shift value is 32 bits, shifted is X - reg32SizeMask8 = 0xffffffff - reg32SizeMask16 = 0xffffffff - reg32SizeMask32 = 0xffffffff - reg32SizeMask64 = 0xffffffff - - // reg64SizeMaskX - shift value is 64 bits, shifted is X - reg64SizeMask8 = 0xffffffffffffffff - reg64SizeMask16 = 0xffffffffffffffff - reg64SizeMask32 = 0xffffffffffffffff - reg64SizeMask64 = 0xffffffffffffffff - - // regSizeMaskUintX - shift value is uint, shifted is X - regSizeMaskUint8 = ^uint(0) - regSizeMaskUint16 = ^uint(0) - regSizeMaskUint32 = ^uint(0) - regSizeMaskUint64 = ^uint(0) -) diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/stateless.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/stateless.go deleted file mode 100644 index f3d4139..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/stateless.go +++ /dev/null @@ -1,318 +0,0 @@ -package flate - -import ( - "io" - "math" - "sync" -) - -const ( - maxStatelessBlock = math.MaxInt16 - // dictionary will be taken from maxStatelessBlock, so limit it. - maxStatelessDict = 8 << 10 - - slTableBits = 13 - slTableSize = 1 << slTableBits - slTableShift = 32 - slTableBits -) - -type statelessWriter struct { - dst io.Writer - closed bool -} - -func (s *statelessWriter) Close() error { - if s.closed { - return nil - } - s.closed = true - // Emit EOF block - return StatelessDeflate(s.dst, nil, true, nil) -} - -func (s *statelessWriter) Write(p []byte) (n int, err error) { - err = StatelessDeflate(s.dst, p, false, nil) - if err != nil { - return 0, err - } - return len(p), nil -} - -func (s *statelessWriter) Reset(w io.Writer) { - s.dst = w - s.closed = false -} - -// NewStatelessWriter will do compression but without maintaining any state -// between Write calls. -// There will be no memory kept between Write calls, -// but compression and speed will be suboptimal. -// Because of this, the size of actual Write calls will affect output size. -func NewStatelessWriter(dst io.Writer) io.WriteCloser { - return &statelessWriter{dst: dst} -} - -// bitWriterPool contains bit writers that can be reused. -var bitWriterPool = sync.Pool{ - New: func() interface{} { - return newHuffmanBitWriter(nil) - }, -} - -// StatelessDeflate allows compressing directly to a Writer without retaining state. -// When returning everything will be flushed. -// Up to 8KB of an optional dictionary can be given which is presumed to precede the block. -// Longer dictionaries will be truncated and will still produce valid output. -// Sending nil dictionary is perfectly fine. -func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { - var dst tokens - bw := bitWriterPool.Get().(*huffmanBitWriter) - bw.reset(out) - defer func() { - // don't keep a reference to our output - bw.reset(nil) - bitWriterPool.Put(bw) - }() - if eof && len(in) == 0 { - // Just write an EOF block. - // Could be faster... - bw.writeStoredHeader(0, true) - bw.flush() - return bw.err - } - - // Truncate dict - if len(dict) > maxStatelessDict { - dict = dict[len(dict)-maxStatelessDict:] - } - - // For subsequent loops, keep shallow dict reference to avoid alloc+copy. - var inDict []byte - - for len(in) > 0 { - todo := in - if len(inDict) > 0 { - if len(todo) > maxStatelessBlock-maxStatelessDict { - todo = todo[:maxStatelessBlock-maxStatelessDict] - } - } else if len(todo) > maxStatelessBlock-len(dict) { - todo = todo[:maxStatelessBlock-len(dict)] - } - inOrg := in - in = in[len(todo):] - uncompressed := todo - if len(dict) > 0 { - // combine dict and source - bufLen := len(todo) + len(dict) - combined := make([]byte, bufLen) - copy(combined, dict) - copy(combined[len(dict):], todo) - todo = combined - } - // Compress - if len(inDict) == 0 { - statelessEnc(&dst, todo, int16(len(dict))) - } else { - statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) - } - isEof := eof && len(in) == 0 - - if dst.n == 0 { - bw.writeStoredHeader(len(uncompressed), isEof) - if bw.err != nil { - return bw.err - } - bw.writeBytes(uncompressed) - } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 { - // If we removed less than 1/16th, huffman compress the block. - bw.writeBlockHuff(isEof, uncompressed, len(in) == 0) - } else { - bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0) - } - if len(in) > 0 { - // Retain a dict if we have more - inDict = inOrg[len(uncompressed)-maxStatelessDict:] - dict = nil - dst.Reset() - } - if bw.err != nil { - return bw.err - } - } - if !eof { - // Align, only a stored block can do that. - bw.writeStoredHeader(0, false) - } - bw.flush() - return bw.err -} - -func hashSL(u uint32) uint32 { - return (u * 0x1e35a7bd) >> slTableShift -} - -func load3216(b []byte, i int16) uint32 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:4] - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load6416(b []byte, i int16) uint64 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:8] - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -func statelessEnc(dst *tokens, src []byte, startAt int16) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - - type tableEntry struct { - offset int16 - } - - var table [slTableSize]tableEntry - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src)-int(startAt) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = 0 - return - } - // Index until startAt - if startAt > 0 { - cv := load3232(src, 0) - for i := int16(0); i < startAt; i++ { - table[hashSL(cv)] = tableEntry{offset: i} - cv = (cv >> 8) | (uint32(src[i+4]) << 24) - } - } - - s := startAt + 1 - nextEmit := startAt - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int16(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load3216(src, s) - - for { - const skipLog = 5 - const doEvery = 2 - - nextS := s - var candidate tableEntry - for { - nextHash := hashSL(cv) - candidate = table[nextHash] - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit || nextS <= 0 { - goto emitRemainder - } - - now := load6416(src, nextS) - table[nextHash] = tableEntry{offset: s} - nextHash = hashSL(uint32(now)) - - if cv == load3216(src, candidate.offset) { - table[nextHash] = tableEntry{offset: nextS} - break - } - - // Do one right away... - cv = uint32(now) - s = nextS - nextS++ - candidate = table[nextHash] - now >>= 8 - table[nextHash] = tableEntry{offset: s} - - if cv == load3216(src, candidate.offset) { - table[nextHash] = tableEntry{offset: nextS} - break - } - cv = uint32(now) - s = nextS - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - t := candidate.offset - l := int16(matchLen(src[s+4:], src[t+4:]) + 4) - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - // Save the match found - dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load6416(src, s-2) - o := s - 2 - prevHash := hashSL(uint32(x)) - table[prevHash] = tableEntry{offset: o} - x >>= 16 - currHash := hashSL(uint32(x)) - candidate = table[currHash] - table[currHash] = tableEntry{offset: o + 2} - - if uint32(x) != load3216(src, candidate.offset) { - cv = uint32(x >> 8) - s++ - break - } - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/flate/token.go b/backend/services/controller/vendor/github.com/klauspost/compress/flate/token.go deleted file mode 100644 index d818790..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/flate/token.go +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "math" -) - -const ( - // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits - // bits 16-22 offsetcode - 5 bits - // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits - // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits - lengthShift = 22 - offsetMask = 1<maxnumlit - offHist [32]uint16 // offset codes - litHist [256]uint16 // codes 0->255 - nFilled int - n uint16 // Must be able to contain maxStoreBlockSize - tokens [maxStoreBlockSize + 1]token -} - -func (t *tokens) Reset() { - if t.n == 0 { - return - } - t.n = 0 - t.nFilled = 0 - for i := range t.litHist[:] { - t.litHist[i] = 0 - } - for i := range t.extraHist[:] { - t.extraHist[i] = 0 - } - for i := range t.offHist[:] { - t.offHist[i] = 0 - } -} - -func (t *tokens) Fill() { - if t.n == 0 { - return - } - for i, v := range t.litHist[:] { - if v == 0 { - t.litHist[i] = 1 - t.nFilled++ - } - } - for i, v := range t.extraHist[:literalCount-256] { - if v == 0 { - t.nFilled++ - t.extraHist[i] = 1 - } - } - for i, v := range t.offHist[:offsetCodeCount] { - if v == 0 { - t.offHist[i] = 1 - } - } -} - -func indexTokens(in []token) tokens { - var t tokens - t.indexTokens(in) - return t -} - -func (t *tokens) indexTokens(in []token) { - t.Reset() - for _, tok := range in { - if tok < matchType { - t.AddLiteral(tok.literal()) - continue - } - t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask) - } -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -func emitLiteral(dst *tokens, lit []byte) { - for _, v := range lit { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } -} - -func (t *tokens) AddLiteral(lit byte) { - t.tokens[t.n] = token(lit) - t.litHist[lit]++ - t.n++ -} - -// from https://stackoverflow.com/a/28730362 -func mFastLog2(val float32) float32 { - ux := int32(math.Float32bits(val)) - log2 := (float32)(((ux >> 23) & 255) - 128) - ux &= -0x7f800001 - ux += 127 << 23 - uval := math.Float32frombits(uint32(ux)) - log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759 - return log2 -} - -// EstimatedBits will return an minimum size estimated by an *optimal* -// compression of the block. -// The size of the block -func (t *tokens) EstimatedBits() int { - shannon := float32(0) - bits := int(0) - nMatches := 0 - total := int(t.n) + t.nFilled - if total > 0 { - invTotal := 1.0 / float32(total) - for _, v := range t.litHist[:] { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - } - } - // Just add 15 for EOB - shannon += 15 - for i, v := range t.extraHist[1 : literalCount-256] { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - bits += int(lengthExtraBits[i&31]) * int(v) - nMatches += int(v) - } - } - } - if nMatches > 0 { - invTotal := 1.0 / float32(nMatches) - for i, v := range t.offHist[:offsetCodeCount] { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - bits += int(offsetExtraBits[i&31]) * int(v) - } - } - } - return int(shannon) + bits -} - -// AddMatch adds a match to the tokens. -// This function is very sensitive to inlining and right on the border. -func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { - if debugDeflate { - if xlength >= maxMatchLength+baseMatchLength { - panic(fmt.Errorf("invalid length: %v", xlength)) - } - if xoffset >= maxMatchOffset+baseMatchOffset { - panic(fmt.Errorf("invalid offset: %v", xoffset)) - } - } - oCode := offsetCode(xoffset) - xoffset |= oCode << 16 - - t.extraHist[lengthCodes1[uint8(xlength)]]++ - t.offHist[oCode&31]++ - t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset { - panic(fmt.Errorf("invalid offset: %v", xoffset)) - } - } - oc := offsetCode(xoffset) - xoffset |= oc << 16 - for xlength > 0 { - xl := xlength - if xl > 258 { - // We need to have at least baseMatchLength left over for next loop. - if xl > 258+baseMatchLength { - xl = 258 - } else { - xl = 258 - baseMatchLength - } - } - xlength -= xl - xl -= baseMatchLength - t.extraHist[lengthCodes1[uint8(xl)]]++ - t.offHist[oc&31]++ - t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) } - -// Convert length to code. -func lengthCode(len uint8) uint8 { return lengthCodes[len] } - -// Returns the offset code corresponding to a specific offset -func offsetCode(off uint32) uint32 { - if false { - if off < uint32(len(offsetCodes)) { - return offsetCodes[off&255] - } else if off>>7 < uint32(len(offsetCodes)) { - return offsetCodes[(off>>7)&255] + 14 - } else { - return offsetCodes[(off>>14)&255] + 28 - } - } - if off < uint32(len(offsetCodes)) { - return offsetCodes[uint8(off)] - } - return offsetCodes14[uint8(off>>7)] -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/fse/README.md b/backend/services/controller/vendor/github.com/klauspost/compress/fse/README.md deleted file mode 100644 index ea7324d..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/fse/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# Finite State Entropy - -This package provides Finite State Entropy encoding and decoding. - -Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) -encoding provides a fast near-optimal symbol encoding/decoding -for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) - -## News - - * Feb 2018: First implementation released. Consider this beta software for now. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `(error)` | An internal error occurred. | - -As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). - -# Performance - -A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. -All compression functions are currently only running on the calling goroutine so only one core will be used per block. - -The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input -is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be -beneficial to transpose all your input values down by 64. - -With moderate block sizes around 64k speed are typically 200MB/s per core for compression and -around 300MB/s decompression speed. - -The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. - -# Plans - -At one point, more internals will be exposed to facilitate more "expert" usage of the components. - -A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/fse/bitreader.go b/backend/services/controller/vendor/github.com/klauspost/compress/fse/bitreader.go deleted file mode 100644 index f65eb39..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/fse/bitreader.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "encoding/binary" - "errors" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) uint16 { - if n == 0 || b.bitsRead >= 64 { - return 0 - } - return b.getBitsFast(n) -} - -// getBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) getBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - // 2 bounds checks. - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.bitsRead >= 64 && b.off == 0 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/fse/bitwriter.go b/backend/services/controller/vendor/github.com/klauspost/compress/fse/bitwriter.go deleted file mode 100644 index e82fa3b..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import "fmt" - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/fse/bytereader.go b/backend/services/controller/vendor/github.com/klauspost/compress/fse/bytereader.go deleted file mode 100644 index abade2d..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/fse/bytereader.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/fse/compress.go b/backend/services/controller/vendor/github.com/klauspost/compress/fse/compress.go deleted file mode 100644 index 65d7773..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/fse/compress.go +++ /dev/null @@ -1,683 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "errors" - "fmt" -) - -// Compress the input bytes. Input must be < 2GB. -// Provide a Scratch buffer to avoid memory allocations. -// Note that the output is also kept in the scratch buffer. -// If input is too hard to compress, ErrIncompressible is returned. -// If input is a single byte value repeated ErrUseRLE is returned. -func Compress(in []byte, s *Scratch) ([]byte, error) { - if len(in) <= 1 { - return nil, ErrIncompressible - } - if len(in) > (2<<30)-1 { - return nil, errors.New("input too big, must be < 2GB") - } - s, err := s.prepare(in) - if err != nil { - return nil, err - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - if maxCount == 0 { - maxCount = s.countSimple(in) - } - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount == len(in) { - // One symbol, use RLE - return nil, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, ErrIncompressible - } - s.optimalTableLog() - err = s.normalizeCount() - if err != nil { - return nil, err - } - err = s.writeCount() - if err != nil { - return nil, err - } - - if false { - err = s.validateNorm() - if err != nil { - return nil, err - } - } - - err = s.buildCTable() - if err != nil { - return nil, err - } - err = s.compress(in) - if err != nil { - return nil, err - } - s.Out = s.bw.out - // Check if we compressed. - if len(s.Out) >= len(in) { - return nil, ErrIncompressible - } - return s.Out, nil -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + first.deltaFindState - c.state = c.stateTable[lu] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encodeZero(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) - c.bw.flush() -} - -// compress is the main compression loop that will encode the input from the last byte to the first. -func (s *Scratch) compress(src []byte) error { - if len(src) <= 2 { - return errors.New("compress: src too small") - } - tt := s.ct.symbolTT[:256] - s.bw.reset(s.Out) - - // Our two states each encodes every second byte. - // Last byte encoded (first byte decoded) will always be encoded by c1. - var c1, c2 cState - - // Encode so remaining size is divisible by 4. - ip := len(src) - if ip&1 == 1 { - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - c1.encodeZero(tt[src[ip-3]]) - ip -= 3 - } else { - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - ip -= 2 - } - if ip&2 != 0 { - c2.encodeZero(tt[src[ip-1]]) - c1.encodeZero(tt[src[ip-2]]) - ip -= 2 - } - src = src[:ip] - - // Main compression loop. - switch { - case !s.zeroBits && s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush. - // We do not need to check if any output is 0 bits. - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - c2.encode(tt[v2]) - c1.encode(tt[v3]) - } - case !s.zeroBits: - // We do not need to check if any output is 0 bits. - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - s.bw.flush32() - c2.encode(tt[v2]) - c1.encode(tt[v3]) - } - case s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - } - default: - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - s.bw.flush32() - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - } - } - - // Flush final state. - // Used to initialize state when decoding. - c2.flush(s.actualTableLog) - c1.flush(s.actualTableLog) - - s.bw.close() - return nil -} - -// writeCount will write the normalized histogram count to header. -// This is read back by readNCount. -func (s *Scratch) writeCount() error { - var ( - tableLog = s.actualTableLog - tableSize = 1 << tableLog - previous0 bool - charnum uint16 - - maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 - - // Write Table Size - bitStream = uint32(tableLog - minTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - ) - if cap(s.Out) < maxHeaderSize { - s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) - } - outP := uint(0) - out := s.Out[:maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return errors.New("internal error: remaining<1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += (bitCount + 7) / 8 - - if charnum > s.symbolLen { - return errors.New("internal error: charnum > s.symbolLen") - } - s.Out = out[:outP] - return nil -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaFindState int32 - deltaNbBits uint32 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *Scratch) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *Scratch) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [maxSymbolValue + 2]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = int32(total - 1) - total++ - default: - maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = int32(total - v) - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int) { - for _, v := range in { - s.count[v]++ - } - m, symlen := uint32(0), s.symbolLen - for i, v := range s.count[:] { - if v == 0 { - continue - } - if v > m { - m = v - } - symlen = uint16(i) + 1 - } - s.symbolLen = symlen - return int(m) -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 - minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > maxTableLog { - tableLog = maxTableLog - } - s.actualTableLog = tableLog -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -func (s *Scratch) normalizeCount() error { - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(s.br.remain()) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(s.br.remain() >> tableLog) - ) - - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - return s.normalizeCount2() - } - s.norm[largest] += stillToDistribute - return nil -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *Scratch) normalizeCount2() error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(s.br.remain()) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// validateNorm validates the normalized histogram table. -func (s *Scratch) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 { - if previous0 { - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - n0 += 24 - if b.off < iend-5 { - b.advance(2) - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 16 - bitCount += 16 - } - } - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - for charnum < n0 { - s.norm[charnum&0xff] = 0 - charnum++ - } - - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*(threshold) - 1) - (remaining) - var count int32 - - if (int32(bitStream) & (threshold - 1)) < max { - count = int32(bitStream) & (threshold - 1) - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - count-- // extra accuracy - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - } - bitStream = b.Uint32() >> (bitCount & 31) - } - s.symbolLen = charnum - - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - return nil -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -type decSymbol struct { - newState uint16 - symbol uint8 - nbBits uint8 -} - -// allocDtable will allocate decoding tables if they are not big enough. -func (s *Scratch) allocDtable() { - tableSize := 1 << s.actualTableLog - if cap(s.decTable) < tableSize { - s.decTable = make([]decSymbol, tableSize) - } - s.decTable = s.decTable[:tableSize] - - if cap(s.ct.tableSymbol) < 256 { - s.ct.tableSymbol = make([]byte, 256) - } - s.ct.tableSymbol = s.ct.tableSymbol[:256] - - if cap(s.ct.stateTable) < 256 { - s.ct.stateTable = make([]uint16, 256) - } - s.ct.stateTable = s.ct.stateTable[:256] -} - -// buildDtable will build the decoding table. -func (s *Scratch) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - s.allocDtable() - symbolNext := s.ct.stateTable[:256] - - // Init, lay down lowprob symbols - s.zeroBits = false - { - largeLimit := int16(1 << (s.actualTableLog - 1)) - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.decTable[highThreshold].symbol = uint8(i) - highThreshold-- - symbolNext[i] = 1 - } else { - if v >= largeLimit { - s.zeroBits = true - } - symbolNext[i] = uint16(v) - } - } - } - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.decTable[position].symbol = uint8(ss) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.decTable { - symbol := v.symbol - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.decTable[u].nbBits = nBits - newState := (nextState << nBits) - tableSize - if newState >= tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.decTable[u].newState = newState - } - } - return nil -} - -// decompress will decompress the bitstream. -// If the buffer is over-read an error is returned. -func (s *Scratch) decompress() error { - br := &s.bits - if err := br.init(s.br.unread()); err != nil { - return err - } - - var s1, s2 decoder - // Initialize and decode first state and symbol. - s1.init(br, s.decTable, s.actualTableLog) - s2.init(br, s.decTable, s.actualTableLog) - - // Use temp table to avoid bound checks/append penalty. - var tmp = s.ct.tableSymbol[:256] - var off uint8 - - // Main part - if !s.zeroBits { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.nextFast() - tmp[off+1] = s2.nextFast() - br.fillFast() - tmp[off+2] = s1.nextFast() - tmp[off+3] = s2.nextFast() - off += 4 - // When off is 0, we have overflowed and should write. - if off == 0 { - s.Out = append(s.Out, tmp...) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } else { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.next() - tmp[off+1] = s2.next() - br.fillFast() - tmp[off+2] = s1.next() - tmp[off+3] = s2.next() - off += 4 - if off == 0 { - s.Out = append(s.Out, tmp...) - // When off is 0, we have overflowed and should write. - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } - s.Out = append(s.Out, tmp[:off]...) - - // Final bits, a bit more expensive check - for { - if s1.finished() { - s.Out = append(s.Out, s1.final(), s2.final()) - break - } - br.fill() - s.Out = append(s.Out, s1.next()) - if s2.finished() { - s.Out = append(s.Out, s2.final(), s1.final()) - break - } - s.Out = append(s.Out, s2.next()) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - return br.close() -} - -// decoder keeps track of the current state and updates it from the bitstream. -type decoder struct { - state uint16 - br *bitReader - dt []decSymbol -} - -// init will initialize the decoder and read the first state from the stream. -func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { - d.dt = dt - d.br = in - d.state = in.getBits(tableLog) -} - -// next returns the next symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) next() uint8 { - n := &d.dt[d.state] - lowBits := d.br.getBits(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (d *decoder) finished() bool { - return d.br.finished() && d.dt[d.state].nbBits > 0 -} - -// final returns the current state symbol without decoding the next. -func (d *decoder) final() uint8 { - return d.dt[d.state].symbol -} - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) nextFast() uint8 { - n := d.dt[d.state] - lowBits := d.br.getBitsFast(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/fse/fse.go b/backend/services/controller/vendor/github.com/klauspost/compress/fse/fse.go deleted file mode 100644 index 535cbad..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/fse/fse.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -// Package fse provides Finite State Entropy encoding and decoding. -// -// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding -// for byte blocks as implemented in zstd. -// -// See https://github.com/klauspost/compress/tree/master/fse for more information. -package fse - -import ( - "errors" - "fmt" - "math/bits" -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = 14 - defaultMemoryUsage = 13 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - defaultTablelog = defaultMemoryUsage - 2 - minTablelog = 5 - maxSymbolValue = 255 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") -) - -// Scratch provides temporary storage for compression and decompression. -type Scratch struct { - // Private - count [maxSymbolValue + 1]uint32 - norm [maxSymbolValue + 1]int16 - br byteReader - bits bitReader - bw bitWriter - ct cTable // Compression tables. - decTable []decSymbol // Decompression table. - maxCount int // count of the most probable symbol - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // DecompressLimit limits the maximum decoded size acceptable. - // If > 0 decompression will stop when approximately this many bytes - // has been decoded. - // If 0, maximum size will be 2GB. - DecompressLimit int - - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - TableLog uint8 -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -// The returned slice will always be length 256. -func (s *Scratch) Histogram() []uint32 { - return s.count[:] -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = 255 - } - if s.TableLog == 0 { - s.TableLog = defaultTablelog - } - if s.TableLog > maxTableLog { - return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - s.br.init(in) - if s.DecompressLimit == 0 { - // Max size 2GB. - s.DecompressLimit = (2 << 30) - 1 - } - - return s, nil -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/gen.sh b/backend/services/controller/vendor/github.com/klauspost/compress/gen.sh deleted file mode 100644 index aff9422..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/gen.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -cd s2/cmd/_s2sx/ || exit 1 -go generate . diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/.gitignore b/backend/services/controller/vendor/github.com/klauspost/compress/huff0/.gitignore deleted file mode 100644 index b3d2629..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/huff0-fuzz.zip diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/README.md b/backend/services/controller/vendor/github.com/klauspost/compress/huff0/README.md deleted file mode 100644 index 8b6e5c6..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# Huff0 entropy compression - -This package provides Huff0 encoding and decoding as used in zstd. - -[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), -a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU -(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) - -## News - -This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. - -This ensures that most functionality is well tested. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and -[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | -| `(error)` | An internal error occurred. | - - -As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. - -## Tables and re-use - -Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. - -The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) -that controls this behaviour. See the documentation for details. This can be altered between each block. - -Do however note that this information is *not* stored in the output block and it is up to the users of the package to -record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, -based on the boolean reported back from the CompressXX call. - -If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the -[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. - -## Decompressing - -The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). -This will initialize the decoding tables. -You can supply the complete block to `ReadTable` and it will return the data part of the block -which can be given to the decompressor. - -Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) -or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. - -For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. - -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/bitreader.go b/backend/services/controller/vendor/github.com/klauspost/compress/huff0/bitreader.go deleted file mode 100644 index e36d974..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -import ( - "encoding/binary" - "errors" - "fmt" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderBytes struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderBytes) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderBytes) peekByteFast() uint8 { - got := uint8(b.value >> 56) - return got -} - -func (b *bitReaderBytes) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderBytes) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. -func (b *bitReaderBytes) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderBytes) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderBytes) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -func (b *bitReaderBytes) remaining() uint { - return b.off*8 + uint(64-b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderBytes) close() error { - // Release reference. - b.in = nil - if b.remaining() > 0 { - return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -// bitReaderShifted reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderShifted struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderShifted) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { - return uint16(b.value >> ((64 - n) & 63)) -} - -func (b *bitReaderShifted) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderShifted) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. -func (b *bitReaderShifted) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderShifted) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) - b.bitsRead -= 8 - b.off-- - } -} - -func (b *bitReaderShifted) remaining() uint { - return b.off*8 + uint(64-b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderShifted) close() error { - // Release reference. - b.in = nil - if b.remaining() > 0 { - return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/backend/services/controller/vendor/github.com/klauspost/compress/huff0/bitwriter.go deleted file mode 100644 index 0ebc9aa..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encSymbol(ct cTable, symbol byte) { - enc := ct[symbol] - b.bitContainer |= uint64(enc.val) << (b.nBits & 63) - if false { - if enc.nBits == 0 { - panic("nbits 0") - } - } - b.nBits += enc.nBits -} - -// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { - encA := ct[av] - encB := ct[bv] - sh := b.nBits & 63 - combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) - b.bitContainer |= combined << sh - if false { - if encA.nBits == 0 { - panic("nbitsA 0") - } - if encB.nBits == 0 { - panic("nbitsB 0") - } - } - b.nBits += encA.nBits + encB.nBits -} - -// encFourSymbols adds up to 32 bits from four symbols. -// It will not check if there is space for them, -// so the caller must ensure that b has been flushed recently. -func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { - bitsA := encA.nBits - bitsB := bitsA + encB.nBits - bitsC := bitsB + encC.nBits - bitsD := bitsC + encD.nBits - combined := uint64(encA.val) | - (uint64(encB.val) << (bitsA & 63)) | - (uint64(encC.val) << (bitsB & 63)) | - (uint64(encD.val) << (bitsC & 63)) - b.bitContainer |= combined << (b.nBits & 63) - b.nBits += bitsD -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/bytereader.go b/backend/services/controller/vendor/github.com/klauspost/compress/huff0/bytereader.go deleted file mode 100644 index 4dcab8d..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/bytereader.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - v3 := int32(b.b[b.off+3]) - v2 := int32(b.b[b.off+2]) - v1 := int32(b.b[b.off+1]) - v0 := int32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - v3 := uint32(b.b[b.off+3]) - v2 := uint32(b.b[b.off+2]) - v1 := uint32(b.b[b.off+1]) - v0 := uint32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/compress.go b/backend/services/controller/vendor/github.com/klauspost/compress/huff0/compress.go deleted file mode 100644 index 518436c..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/compress.go +++ /dev/null @@ -1,741 +0,0 @@ -package huff0 - -import ( - "fmt" - "math" - "runtime" - "sync" -) - -// Compress1X will compress the input. -// The output can be decoded using Decompress1X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - return compress(in, s, s.compress1X) -} - -// Compress4X will compress the input. The input is split into 4 independent blocks -// and compressed similar to Compress1X. -// The output can be decoded using Decompress4X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - if false { - // TODO: compress4Xp only slightly faster. - const parallelThreshold = 8 << 10 - if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { - return compress(in, s, s.compress4X) - } - return compress(in, s, s.compress4Xp) - } - return compress(in, s, s.compress4X) -} - -func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { - // Nuke previous table if we cannot reuse anyway. - if s.Reuse == ReusePolicyNone { - s.prevTable = s.prevTable[:0] - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return nil, false, ErrIncompressible - } - // One symbol, use RLE - return nil, false, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, false, ErrIncompressible - } - if s.Reuse == ReusePolicyMust && !canReuse { - // We must reuse, but we can't. - return nil, false, ErrIncompressible - } - if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { - keepTable := s.cTable - keepTL := s.actualTableLog - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - s.cTable = keepTable - s.actualTableLog = keepTL - if err == nil && len(s.Out) < wantSize { - s.OutData = s.Out - return s.Out, true, nil - } - if s.Reuse == ReusePolicyMust { - return nil, false, ErrIncompressible - } - // Do not attempt to re-use later. - s.prevTable = s.prevTable[:0] - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return nil, false, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - if s.Reuse == ReusePolicyAllow && canReuse { - hSize := len(s.Out) - oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) - newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) - if oldSize <= hSize+newSize || hSize+12 >= wantSize { - // Retain cTable even if we re-use. - keepTable := s.cTable - keepTL := s.actualTableLog - - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - - // Restore ctable. - s.cTable = keepTable - s.actualTableLog = keepTL - if err != nil { - return nil, false, err - } - if len(s.Out) >= wantSize { - return nil, false, ErrIncompressible - } - s.OutData = s.Out - return s.Out, true, nil - } - } - - // Use new table - err = s.cTable.write(s) - if err != nil { - s.OutTable = nil - return nil, false, err - } - s.OutTable = s.Out - - // Compress using new table - s.Out, err = compressor(in) - if err != nil { - s.OutTable = nil - return nil, false, err - } - if len(s.Out) >= wantSize { - s.OutTable = nil - return nil, false, ErrIncompressible - } - // Move current table into previous. - s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] - s.OutData = s.Out[len(s.OutTable):] - return s.Out, false, nil -} - -// EstimateSizes will estimate the data sizes -func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { - s, err = s.prepare(in) - if err != nil { - return 0, 0, 0, err - } - - // Create histogram, if none was provided. - tableSz, dataSz, reuseSz = -1, -1, -1 - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return 0, 0, 0, ErrIncompressible - } - // One symbol, use RLE - return 0, 0, 0, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return 0, 0, 0, ErrIncompressible - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return 0, 0, 0, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - tableSz, err = s.cTable.estTableSize(s) - if err != nil { - return 0, 0, 0, err - } - if canReuse { - reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) - } - dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) - - // Restore - return tableSz, dataSz, reuseSz, nil -} - -func (s *Scratch) compress1X(src []byte) ([]byte, error) { - return s.compress1xDo(s.Out, src), nil -} - -func (s *Scratch) compress1xDo(dst, src []byte) []byte { - var bw = bitWriter{out: dst} - - // N is length divisible by 4. - n := len(src) - n -= n & 3 - cTable := s.cTable[:256] - - // Encode last bytes. - for i := len(src) & 3; i > 0; i-- { - bw.encSymbol(cTable, src[n+i-1]) - } - n -= 4 - if s.actualTableLog <= 8 { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) - } - } else { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.flush32() - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) - } - } - bw.close() - return bw.out -} - -var sixZeros [6]byte - -func (s *Scratch) compress4X(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - segmentSize := (len(src) + 3) / 4 - - // Add placeholder for output length - offsetIdx := len(s.Out) - s.Out = append(s.Out, sixZeros[:]...) - - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - idx := len(s.Out) - s.Out = s.compress1xDo(s.Out, toDo) - if len(s.Out)-idx > math.MaxUint16 { - // We cannot store the size in the jump table - return nil, ErrIncompressible - } - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - length := len(s.Out) - idx - s.Out[i*2+offsetIdx] = byte(length) - s.Out[i*2+offsetIdx+1] = byte(length >> 8) - } - } - - return s.Out, nil -} - -// compress4Xp will compress 4 streams using separate goroutines. -func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - // Add placeholder for output length - s.Out = s.Out[:6] - - segmentSize := (len(src) + 3) / 4 - var wg sync.WaitGroup - wg.Add(4) - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - // Separate goroutine for each block. - go func(i int) { - s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) - wg.Done() - }(i) - } - wg.Wait() - for i := 0; i < 4; i++ { - o := s.tmpOut[i] - if len(o) > math.MaxUint16 { - // We cannot store the size in the jump table - return nil, ErrIncompressible - } - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - s.Out[i*2] = byte(len(o)) - s.Out[i*2+1] = byte(len(o) >> 8) - } - - // Write output. - s.Out = append(s.Out, o...) - } - return s.Out, nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { - reuse = true - for _, v := range in { - s.count[v]++ - } - m := uint32(0) - if len(s.prevTable) > 0 { - for i, v := range s.count[:] { - if v == 0 { - continue - } - if v > m { - m = v - } - s.symbolLen = uint16(i) + 1 - if i >= len(s.prevTable) { - reuse = false - } else if s.prevTable[i].nBits == 0 { - reuse = false - } - } - return int(m), reuse - } - for i, v := range s.count[:] { - if v == 0 { - continue - } - if v > m { - m = v - } - s.symbolLen = uint16(i) + 1 - } - return int(m), false -} - -func (s *Scratch) canUseTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 && c[i].nBits == 0 { - return false - } - } - return true -} - -//lint:ignore U1000 used for debugging -func (s *Scratch) validateTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 { - if c[i].nBits == 0 { - return false - } - if c[i].nBits > s.actualTableLog { - return false - } - } - } - return true -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBit32(uint32(s.br.remain())) + 1 - minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > tableLogMax { - tableLog = tableLogMax - } - s.actualTableLog = tableLog -} - -type cTableEntry struct { - val uint16 - nBits uint8 - // We have 8 bits extra -} - -const huffNodesMask = huffNodesLen - 1 - -func (s *Scratch) buildCTable() error { - s.optimalTableLog() - s.huffSort() - if cap(s.cTable) < maxSymbolValue+1 { - s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) - } else { - s.cTable = s.cTable[:s.symbolLen] - for i := range s.cTable { - s.cTable[i] = cTableEntry{} - } - } - - var startNode = int16(s.symbolLen) - nonNullRank := s.symbolLen - 1 - - nodeNb := startNode - huffNode := s.nodes[1 : huffNodesLen+1] - - // This overlays the slice above, but allows "-1" index lookups. - // Different from reference implementation. - huffNode0 := s.nodes[0 : huffNodesLen+1] - - for huffNode[nonNullRank].count() == 0 { - nonNullRank-- - } - - lowS := int16(nonNullRank) - nodeRoot := nodeNb + lowS - 1 - lowN := nodeNb - huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) - huffNode[lowS].setParent(nodeNb) - huffNode[lowS-1].setParent(nodeNb) - nodeNb++ - lowS -= 2 - for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].setCount(1 << 30) - } - // fake entry, strong barrier - huffNode0[0].setCount(1 << 31) - - // create parents - for nodeNb <= nodeRoot { - var n1, n2 int16 - if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { - n1 = lowS - lowS-- - } else { - n1 = lowN - lowN++ - } - if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { - n2 = lowS - lowS-- - } else { - n2 = lowN - lowN++ - } - - huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) - huffNode0[n1+1].setParent(nodeNb) - huffNode0[n2+1].setParent(nodeNb) - nodeNb++ - } - - // distribute weights (unlimited tree height) - huffNode[nodeRoot].setNbBits(0) - for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) - } - for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) - } - s.actualTableLog = s.setMaxHeight(int(nonNullRank)) - maxNbBits := s.actualTableLog - - // fill result into tree (val, nbBits) - if maxNbBits > tableLogMax { - return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) - } - var nbPerRank [tableLogMax + 1]uint16 - var valPerRank [16]uint16 - for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits()]++ - } - // determine stating value per rank - { - min := uint16(0) - for n := maxNbBits; n > 0; n-- { - // get starting value within each rank - valPerRank[n] = min - min += nbPerRank[n] - min >>= 1 - } - } - - // push nbBits per symbol, symbol order - for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol()].nBits = v.nbBits() - } - - // assign value within rank, symbol order - t := s.cTable[:s.symbolLen] - for n, val := range t { - nbits := val.nBits & 15 - v := valPerRank[nbits] - t[n].val = v - valPerRank[nbits] = v + 1 - } - - return nil -} - -// huffSort will sort symbols, decreasing order. -func (s *Scratch) huffSort() { - type rankPos struct { - base uint32 - current uint32 - } - - // Clear nodes - nodes := s.nodes[:huffNodesLen+1] - s.nodes = nodes - nodes = nodes[1 : huffNodesLen+1] - - // Sort into buckets based on length of symbol count. - var rank [32]rankPos - for _, v := range s.count[:s.symbolLen] { - r := highBit32(v+1) & 31 - rank[r].base++ - } - // maxBitLength is log2(BlockSizeMax) + 1 - const maxBitLength = 18 + 1 - for n := maxBitLength; n > 0; n-- { - rank[n-1].base += rank[n].base - } - for n := range rank[:maxBitLength] { - rank[n].current = rank[n].base - } - for n, c := range s.count[:s.symbolLen] { - r := (highBit32(c+1) + 1) & 31 - pos := rank[r].current - rank[r].current++ - prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count() { - nodes[pos&huffNodesMask] = prev - pos-- - prev = nodes[(pos-1)&huffNodesMask] - } - nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) - } -} - -func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { - maxNbBits := s.actualTableLog - huffNode := s.nodes[1 : huffNodesLen+1] - //huffNode = huffNode[: huffNodesLen] - - largestBits := huffNode[lastNonNull].nbBits() - - // early exit : no elt > maxNbBits - if largestBits <= maxNbBits { - return largestBits - } - totalCost := int(0) - baseCost := int(1) << (largestBits - maxNbBits) - n := uint32(lastNonNull) - - for huffNode[n].nbBits() > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) - huffNode[n].setNbBits(maxNbBits) - n-- - } - // n stops at huffNode[n].nbBits <= maxNbBits - - for huffNode[n].nbBits() == maxNbBits { - n-- - } - // n end at index of smallest symbol using < maxNbBits - - // renorm totalCost - totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ - - // repay normalized cost - { - const noSymbol = 0xF0F0F0F0 - var rankLast [tableLogMax + 2]uint32 - - for i := range rankLast[:] { - rankLast[i] = noSymbol - } - - // Get pos of last (smallest) symbol per rank - { - currentNbBits := maxNbBits - for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits() >= currentNbBits { - continue - } - currentNbBits = huffNode[pos].nbBits() // < maxNbBits - rankLast[maxNbBits-currentNbBits] = uint32(pos) - } - } - - for totalCost > 0 { - nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 - - for ; nBitsToDecrease > 1; nBitsToDecrease-- { - highPos := rankLast[nBitsToDecrease] - lowPos := rankLast[nBitsToDecrease-1] - if highPos == noSymbol { - continue - } - if lowPos == noSymbol { - break - } - highTotal := huffNode[highPos].count() - lowTotal := 2 * huffNode[lowPos].count() - if highTotal <= lowTotal { - break - } - } - // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) - // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary - // FIXME: try to remove - for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { - nBitsToDecrease++ - } - totalCost -= 1 << (nBitsToDecrease - 1) - if rankLast[nBitsToDecrease-1] == noSymbol { - // this rank is no longer empty - rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] - } - huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + - huffNode[rankLast[nBitsToDecrease]].nbBits()) - if rankLast[nBitsToDecrease] == 0 { - /* special case, reached largest symbol */ - rankLast[nBitsToDecrease] = noSymbol - } else { - rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { - rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ - } - } - } - - for totalCost < 0 { /* Sometimes, cost correction overshoot */ - if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits() == maxNbBits { - n-- - } - huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) - rankLast[1] = n + 1 - totalCost++ - continue - } - huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) - rankLast[1]++ - totalCost++ - } - } - return maxNbBits -} - -// A nodeElt is the fields -// -// count uint32 -// parent uint16 -// symbol byte -// nbBits uint8 -// -// in some order, all squashed into an integer so that the compiler -// always loads and stores entire nodeElts instead of separate fields. -type nodeElt uint64 - -func makeNodeElt(count uint32, symbol byte) nodeElt { - return nodeElt(count) | nodeElt(symbol)<<48 -} - -func (e *nodeElt) count() uint32 { return uint32(*e) } -func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } -func (e *nodeElt) symbol() byte { return byte(*e >> 48) } -func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } - -func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } -func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } -func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/decompress.go b/backend/services/controller/vendor/github.com/klauspost/compress/huff0/decompress.go deleted file mode 100644 index 54bd08b..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/decompress.go +++ /dev/null @@ -1,1167 +0,0 @@ -package huff0 - -import ( - "errors" - "fmt" - "io" - "sync" - - "github.com/klauspost/compress/fse" -) - -type dTable struct { - single []dEntrySingle -} - -// single-symbols decoding -type dEntrySingle struct { - entry uint16 -} - -// Uses special code for all tables that are < 8 bits. -const use8BitTables = true - -// ReadTable will read a table from the input. -// The size of the input may be larger than the table definition. -// Any content remaining after the table definition will be returned. -// If no Scratch is provided a new one is allocated. -// The returned Scratch can be used for encoding or decoding input using this table. -func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { - s, err = s.prepare(nil) - if err != nil { - return s, nil, err - } - if len(in) <= 1 { - return s, nil, errors.New("input too small for table") - } - iSize := in[0] - in = in[1:] - if iSize >= 128 { - // Uncompressed - oSize := iSize - 127 - iSize = (oSize + 1) / 2 - if int(iSize) > len(in) { - return s, nil, errors.New("input too small for table") - } - for n := uint8(0); n < oSize; n += 2 { - v := in[n/2] - s.huffWeight[n] = v >> 4 - s.huffWeight[n+1] = v & 15 - } - s.symbolLen = uint16(oSize) - in = in[iSize:] - } else { - if len(in) < int(iSize) { - return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) - } - // FSE compressed weights - s.fse.DecompressLimit = 255 - hw := s.huffWeight[:] - s.fse.Out = hw - b, err := fse.Decompress(in[:iSize], s.fse) - s.fse.Out = nil - if err != nil { - return s, nil, fmt.Errorf("fse decompress returned: %w", err) - } - if len(b) > 255 { - return s, nil, errors.New("corrupt input: output table too large") - } - s.symbolLen = uint16(len(b)) - in = in[iSize:] - } - - // collect weight stats - var rankStats [16]uint32 - weightTotal := uint32(0) - for _, v := range s.huffWeight[:s.symbolLen] { - if v > tableLogMax { - return s, nil, errors.New("corrupt input: weight too large") - } - v2 := v & 15 - rankStats[v2]++ - // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. - weightTotal += (1 << v2) >> 1 - } - if weightTotal == 0 { - return s, nil, errors.New("corrupt input: weights zero") - } - - // get last non-null symbol weight (implied, total must be 2^n) - { - tableLog := highBit32(weightTotal) + 1 - if tableLog > tableLogMax { - return s, nil, errors.New("corrupt input: tableLog too big") - } - s.actualTableLog = uint8(tableLog) - // determine last weight - { - total := uint32(1) << tableLog - rest := total - weightTotal - verif := uint32(1) << highBit32(rest) - lastWeight := highBit32(rest) + 1 - if verif != rest { - // last value must be a clean power of 2 - return s, nil, errors.New("corrupt input: last value not power of two") - } - s.huffWeight[s.symbolLen] = uint8(lastWeight) - s.symbolLen++ - rankStats[lastWeight]++ - } - } - - if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { - // by construction : at least 2 elts of rank 1, must be even - return s, nil, errors.New("corrupt input: min elt size, even check failed ") - } - - // TODO: Choose between single/double symbol decoding - - // Calculate starting value for each rank - { - var nextRankStart uint32 - for n := uint8(1); n < s.actualTableLog+1; n++ { - current := nextRankStart - nextRankStart += rankStats[n] << (n - 1) - rankStats[n] = current - } - } - - // fill DTable (always full size) - tSize := 1 << tableLogMax - if len(s.dt.single) != tSize { - s.dt.single = make([]dEntrySingle, tSize) - } - cTable := s.prevTable - if cap(cTable) < maxSymbolValue+1 { - cTable = make([]cTableEntry, 0, maxSymbolValue+1) - } - cTable = cTable[:maxSymbolValue+1] - s.prevTable = cTable[:s.symbolLen] - s.prevTableLog = s.actualTableLog - - for n, w := range s.huffWeight[:s.symbolLen] { - if w == 0 { - cTable[n] = cTableEntry{ - val: 0, - nBits: 0, - } - continue - } - length := (uint32(1) << w) >> 1 - d := dEntrySingle{ - entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), - } - - rank := &rankStats[w] - cTable[n] = cTableEntry{ - val: uint16(*rank >> (w - 1)), - nBits: uint8(d.entry), - } - - single := s.dt.single[*rank : *rank+length] - for i := range single { - single[i] = d - } - *rank += length - } - - return s, in, nil -} - -// Decompress1X will decompress a 1X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { - if cap(s.Out) < s.MaxDecodedSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:s.MaxDecodedSize] - s.Out, err = s.Decoder().Decompress1X(s.Out, in) - return s.Out, err -} - -// Decompress4X will decompress a 4X encoded stream. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// The length of the supplied input must match the end of a block exactly. -// The destination size of the uncompressed data must be known and provided. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { - if dstSize > s.MaxDecodedSize { - return nil, ErrMaxDecodedSizeExceeded - } - if cap(s.Out) < dstSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:dstSize] - s.Out, err = s.Decoder().Decompress4X(s.Out, in) - return s.Out, err -} - -// Decoder will return a stateless decoder that can be used by multiple -// decompressors concurrently. -// Before this is called, the table must be initialized with ReadTable. -// The Decoder is still linked to the scratch buffer so that cannot be reused. -// However, it is safe to discard the scratch. -func (s *Scratch) Decoder() *Decoder { - return &Decoder{ - dt: s.dt, - actualTableLog: s.actualTableLog, - bufs: &s.decPool, - } -} - -// Decoder provides stateless decoding. -type Decoder struct { - dt dTable - actualTableLog uint8 - bufs *sync.Pool -} - -func (d *Decoder) buffer() *[4][256]byte { - buf, ok := d.bufs.Get().(*[4][256]byte) - if ok { - return buf - } - return &[4][256]byte{} -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress1X8BitExactly(dst, src) - } - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - switch d.actualTableLog { - case 8: - const shift = 0 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 7: - const shift = 8 - 7 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 6: - const shift = 8 - 6 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 5: - const shift = 8 - 5 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 4: - const shift = 8 - 4 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 3: - const shift = 8 - 3 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 2: - const shift = 8 - 2 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 1: - const shift = 8 - 1 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - default: - d.bufs.Put(bufs) - return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - shift := (8 - d.actualTableLog) & 7 - - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()>>shift] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - const shift = 56 - - //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress4X8bitExactly(dst, src) - } - - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - shift := (56 + (8 - d.actualTableLog)) & 63 - - const tlSize = 1 << 8 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - off += 4 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - if br.finished() { - d.bufs.Put(buf) - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[uint8(br.value>>shift)].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - d.bufs.Put(buf) - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const shift = 56 - const tlSize = 1 << 8 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - off += 4 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - // copy(out[dstEvery*3:], buf[3][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - if br.finished() { - d.bufs.Put(buf) - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[br.peekByteFast()].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - d.bufs.Put(buf) - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// matches will compare a decoding table to a coding table. -// Errors are written to the writer. -// Nothing will be written if table is ok. -func (s *Scratch) matches(ct cTable, w io.Writer) { - if s == nil || len(s.dt.single) == 0 { - return - } - dt := s.dt.single[:1<>8) == byte(sym) { - fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) - errs++ - break - } - } - if errs == 0 { - broken-- - } - continue - } - // Unused bits in input - ub := tablelog - enc.nBits - top := enc.val << ub - // decoder looks at top bits. - dec := dt[top] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) - continue - } - // Ensure that all combinations are covered. - for i := uint16(0); i < (1 << ub); i++ { - vval := top | i - dec := dt[vval] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) - break - } - } - if errs == 0 { - ok++ - broken-- - } - } - if broken > 0 { - fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) - } -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/backend/services/controller/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go deleted file mode 100644 index ba7e8e6..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ /dev/null @@ -1,226 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// This file contains the specialisation of Decoder.Decompress4X -// and Decoder.Decompress1X that use an asm implementation of thir main loops. -package huff0 - -import ( - "errors" - "fmt" - - "github.com/klauspost/compress/internal/cpuinfo" -) - -// decompress4x_main_loop_x86 is an x86 assembler implementation -// of Decompress4X when tablelog > 8. -// -//go:noescape -func decompress4x_main_loop_amd64(ctx *decompress4xContext) - -// decompress4x_8b_loop_x86 is an x86 assembler implementation -// of Decompress4X when tablelog <= 8 which decodes 4 entries -// per loop. -// -//go:noescape -func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) - -// fallback8BitSize is the size where using Go version is faster. -const fallback8BitSize = 800 - -type decompress4xContext struct { - pbr *[4]bitReaderShifted - peekBits uint8 - out *byte - dstEvery int - tbl *dEntrySingle - decoded int - limit *byte -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - - use8BitTables := d.actualTableLog <= 8 - if cap(dst) < fallback8BitSize && use8BitTables { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - // Decode "jump table" - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - var decoded int - - if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { - ctx := decompress4xContext{ - pbr: &br, - peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() - out: &out[0], - dstEvery: dstEvery, - tbl: &single[0], - limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. - } - if use8BitTables { - decompress4x_8b_main_loop_amd64(&ctx) - } else { - decompress4x_main_loop_amd64(&ctx) - } - - decoded = ctx.decoded - out = out[decoded/4:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - br.fill() - if offset >= endsAt { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// decompress4x_main_loop_x86 is an x86 assembler implementation -// of Decompress1X when tablelog > 8. -// -//go:noescape -func decompress1x_main_loop_amd64(ctx *decompress1xContext) - -// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation -// of Decompress1X when tablelog > 8. -// -//go:noescape -func decompress1x_main_loop_bmi2(ctx *decompress1xContext) - -type decompress1xContext struct { - pbr *bitReaderShifted - peekBits uint8 - out *byte - outCap int - tbl *dEntrySingle - decoded int -} - -// Error reported by asm implementations -const error_max_decoded_size_exeeded = -1 - -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:maxDecodedSize] - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - - if maxDecodedSize >= 4 { - ctx := decompress1xContext{ - pbr: &br, - out: &dst[0], - outCap: maxDecodedSize, - peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() - tbl: &d.dt.single[0], - } - - if cpuinfo.HasBMI2() { - decompress1x_main_loop_bmi2(&ctx) - } else { - decompress1x_main_loop_amd64(&ctx) - } - if ctx.decoded == error_max_decoded_size_exeeded { - return nil, ErrMaxDecodedSizeExceeded - } - - dst = dst[:ctx.decoded] - } - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - return dst, br.close() -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/backend/services/controller/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s deleted file mode 100644 index c4c7ab2..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s +++ /dev/null @@ -1,830 +0,0 @@ -// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. - -//go:build amd64 && !appengine && !noasm && gc - -// func decompress4x_main_loop_amd64(ctx *decompress4xContext) -TEXT ·decompress4x_main_loop_amd64(SB), $0-8 - // Preload values - MOVQ ctx+0(FP), AX - MOVBQZX 8(AX), DI - MOVQ 16(AX), BX - MOVQ 48(AX), SI - MOVQ 24(AX), R8 - MOVQ 32(AX), R9 - MOVQ (AX), R10 - - // Main loop -main_loop: - XORL DX, DX - CMPQ BX, SI - SETGE DL - - // br0.fillFast32() - MOVQ 32(R10), R11 - MOVBQZX 40(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill0 - MOVQ 24(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ (R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 24(R10) - ORQ R13, R11 - - // exhausted += (br0.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill0: - // val0 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br0.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (BX) - - // update the bitreader structure - MOVQ R11, 32(R10) - MOVB R12, 40(R10) - - // br1.fillFast32() - MOVQ 80(R10), R11 - MOVBQZX 88(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill1 - MOVQ 72(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ 48(R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 72(R10) - ORQ R13, R11 - - // exhausted += (br1.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill1: - // val0 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br1.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (BX)(R8*1) - - // update the bitreader structure - MOVQ R11, 80(R10) - MOVB R12, 88(R10) - - // br2.fillFast32() - MOVQ 128(R10), R11 - MOVBQZX 136(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill2 - MOVQ 120(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ 96(R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 120(R10) - ORQ R13, R11 - - // exhausted += (br2.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill2: - // val0 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br2.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (BX)(R8*2) - - // update the bitreader structure - MOVQ R11, 128(R10) - MOVB R12, 136(R10) - - // br3.fillFast32() - MOVQ 176(R10), R11 - MOVBQZX 184(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill3 - MOVQ 168(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ 144(R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 168(R10) - ORQ R13, R11 - - // exhausted += (br3.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill3: - // val0 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br3.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - LEAQ (R8)(R8*2), CX - MOVW AX, (BX)(CX*1) - - // update the bitreader structure - MOVQ R11, 176(R10) - MOVB R12, 184(R10) - ADDQ $0x02, BX - TESTB DL, DL - JZ main_loop - MOVQ ctx+0(FP), AX - SUBQ 16(AX), BX - SHLQ $0x02, BX - MOVQ BX, 40(AX) - RET - -// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) -TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 - // Preload values - MOVQ ctx+0(FP), CX - MOVBQZX 8(CX), DI - MOVQ 16(CX), BX - MOVQ 48(CX), SI - MOVQ 24(CX), R8 - MOVQ 32(CX), R9 - MOVQ (CX), R10 - - // Main loop -main_loop: - XORL DX, DX - CMPQ BX, SI - SETGE DL - - // br0.fillFast32() - MOVQ 32(R10), R11 - MOVBQZX 40(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill0 - MOVQ 24(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ (R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 24(R10) - ORQ R14, R11 - - // exhausted += (br0.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill0: - // val0 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (BX) - - // update the bitreader structure - MOVQ R11, 32(R10) - MOVB R12, 40(R10) - - // br1.fillFast32() - MOVQ 80(R10), R11 - MOVBQZX 88(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill1 - MOVQ 72(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ 48(R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 72(R10) - ORQ R14, R11 - - // exhausted += (br1.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill1: - // val0 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (BX)(R8*1) - - // update the bitreader structure - MOVQ R11, 80(R10) - MOVB R12, 88(R10) - - // br2.fillFast32() - MOVQ 128(R10), R11 - MOVBQZX 136(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill2 - MOVQ 120(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ 96(R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 120(R10) - ORQ R14, R11 - - // exhausted += (br2.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill2: - // val0 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (BX)(R8*2) - - // update the bitreader structure - MOVQ R11, 128(R10) - MOVB R12, 136(R10) - - // br3.fillFast32() - MOVQ 176(R10), R11 - MOVBQZX 184(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill3 - MOVQ 168(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ 144(R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 168(R10) - ORQ R14, R11 - - // exhausted += (br3.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill3: - // val0 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - LEAQ (R8)(R8*2), CX - MOVL AX, (BX)(CX*1) - - // update the bitreader structure - MOVQ R11, 176(R10) - MOVB R12, 184(R10) - ADDQ $0x04, BX - TESTB DL, DL - JZ main_loop - MOVQ ctx+0(FP), AX - SUBQ 16(AX), BX - SHLQ $0x02, BX - MOVQ BX, 40(AX) - RET - -// func decompress1x_main_loop_amd64(ctx *decompress1xContext) -TEXT ·decompress1x_main_loop_amd64(SB), $0-8 - MOVQ ctx+0(FP), CX - MOVQ 16(CX), DX - MOVQ 24(CX), BX - CMPQ BX, $0x04 - JB error_max_decoded_size_exceeded - LEAQ (DX)(BX*1), BX - MOVQ (CX), SI - MOVQ (SI), R8 - MOVQ 24(SI), R9 - MOVQ 32(SI), R10 - MOVBQZX 40(SI), R11 - MOVQ 32(CX), SI - MOVBQZX 8(CX), DI - JMP loop_condition - -main_loop: - // Check if we have room for 4 bytes in the output buffer - LEAQ 4(DX), CX - CMPQ CX, BX - JGE error_max_decoded_size_exceeded - - // Decode 4 values - CMPQ R11, $0x20 - JL bitReader_fillFast_1_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), R12 - MOVQ R11, CX - SHLQ CL, R12 - ORQ R12, R10 - -bitReader_fillFast_1_end: - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - BSWAPL AX - CMPQ R11, $0x20 - JL bitReader_fillFast_2_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), R12 - MOVQ R11, CX - SHLQ CL, R12 - ORQ R12, R10 - -bitReader_fillFast_2_end: - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - BSWAPL AX - - // Store the decoded values - MOVL AX, (DX) - ADDQ $0x04, DX - -loop_condition: - CMPQ R9, $0x08 - JGE main_loop - - // Update ctx structure - MOVQ ctx+0(FP), AX - SUBQ 16(AX), DX - MOVQ DX, 40(AX) - MOVQ (AX), AX - MOVQ R9, 24(AX) - MOVQ R10, 32(AX) - MOVB R11, 40(AX) - RET - - // Report error -error_max_decoded_size_exceeded: - MOVQ ctx+0(FP), AX - MOVQ $-1, CX - MOVQ CX, 40(AX) - RET - -// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) -// Requires: BMI2 -TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 - MOVQ ctx+0(FP), CX - MOVQ 16(CX), DX - MOVQ 24(CX), BX - CMPQ BX, $0x04 - JB error_max_decoded_size_exceeded - LEAQ (DX)(BX*1), BX - MOVQ (CX), SI - MOVQ (SI), R8 - MOVQ 24(SI), R9 - MOVQ 32(SI), R10 - MOVBQZX 40(SI), R11 - MOVQ 32(CX), SI - MOVBQZX 8(CX), DI - JMP loop_condition - -main_loop: - // Check if we have room for 4 bytes in the output buffer - LEAQ 4(DX), CX - CMPQ CX, BX - JGE error_max_decoded_size_exceeded - - // Decode 4 values - CMPQ R11, $0x20 - JL bitReader_fillFast_1_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), CX - SHLXQ R11, CX, CX - ORQ CX, R10 - -bitReader_fillFast_1_end: - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - BSWAPL AX - CMPQ R11, $0x20 - JL bitReader_fillFast_2_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), CX - SHLXQ R11, CX, CX - ORQ CX, R10 - -bitReader_fillFast_2_end: - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - BSWAPL AX - - // Store the decoded values - MOVL AX, (DX) - ADDQ $0x04, DX - -loop_condition: - CMPQ R9, $0x08 - JGE main_loop - - // Update ctx structure - MOVQ ctx+0(FP), AX - SUBQ 16(AX), DX - MOVQ DX, 40(AX) - MOVQ (AX), AX - MOVQ R9, 24(AX) - MOVQ R10, 32(AX) - MOVB R11, 40(AX) - RET - - // Report error -error_max_decoded_size_exceeded: - MOVQ ctx+0(FP), AX - MOVQ $-1, CX - MOVQ CX, 40(AX) - RET diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/backend/services/controller/vendor/github.com/klauspost/compress/huff0/decompress_generic.go deleted file mode 100644 index 908c17d..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/decompress_generic.go +++ /dev/null @@ -1,299 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -// This file contains a generic implementation of Decoder.Decompress4X. -package huff0 - -import ( - "errors" - "fmt" -) - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - // Decode "jump table" - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off] = uint8(v.entry >> 8) - buf[stream2][off] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off+1] = uint8(v.entry >> 8) - buf[stream2][off+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off] = uint8(v.entry >> 8) - buf[stream2][off] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off+1] = uint8(v.entry >> 8) - buf[stream2][off+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - //copy(out[dstEvery*3:], buf[3][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - br.fill() - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress1X8Bit(dst, src) - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - dt := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - for br.off >= 8 { - br.fillFast() - v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - // Refill - br.fillFast() - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - if len(dst) >= maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/huff0.go b/backend/services/controller/vendor/github.com/klauspost/compress/huff0/huff0.go deleted file mode 100644 index e8ad17a..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/huff0/huff0.go +++ /dev/null @@ -1,337 +0,0 @@ -// Package huff0 provides fast huffman encoding as used in zstd. -// -// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. -package huff0 - -import ( - "errors" - "fmt" - "math" - "math/bits" - "sync" - - "github.com/klauspost/compress/fse" -) - -const ( - maxSymbolValue = 255 - - // zstandard limits tablelog to 11, see: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description - tableLogMax = 11 - tableLogDefault = 11 - minTablelog = 5 - huffNodesLen = 512 - - // BlockSizeMax is maximum input size for a single block uncompressed. - BlockSizeMax = 1<<18 - 1 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") - - // ErrTooBig is return if input is too large for a single block. - ErrTooBig = errors.New("input too big") - - // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. - ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") -) - -type ReusePolicy uint8 - -const ( - // ReusePolicyAllow will allow reuse if it produces smaller output. - ReusePolicyAllow ReusePolicy = iota - - // ReusePolicyPrefer will re-use aggressively if possible. - // This will not check if a new table will produce smaller output, - // except if the current table is impossible to use or - // compressed output is bigger than input. - ReusePolicyPrefer - - // ReusePolicyNone will disable re-use of tables. - // This is slightly faster than ReusePolicyAllow but may produce larger output. - ReusePolicyNone - - // ReusePolicyMust must allow reuse and produce smaller output. - ReusePolicyMust -) - -type Scratch struct { - count [maxSymbolValue + 1]uint32 - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // OutTable will contain the table data only, if a new table has been generated. - // Slice of the returned data. - OutTable []byte - - // OutData will contain the compressed data. - // Slice of the returned data. - OutData []byte - - // MaxDecodedSize will set the maximum allowed output size. - // This value will automatically be set to BlockSizeMax if not set. - // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. - MaxDecodedSize int - - br byteReader - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - // Must be <= 11 and >= 5. - TableLog uint8 - - // Reuse will specify the reuse policy - Reuse ReusePolicy - - // WantLogLess allows to specify a log 2 reduction that should at least be achieved, - // otherwise the block will be returned as incompressible. - // The reduction should then at least be (input size >> WantLogLess) - // If WantLogLess == 0 any improvement will do. - WantLogLess uint8 - - symbolLen uint16 // Length of active part of the symbol table. - maxCount int // count of the most probable symbol - clearCount bool // clear count - actualTableLog uint8 // Selected tablelog. - prevTableLog uint8 // Tablelog for previous table - prevTable cTable // Table used for previous compression. - cTable cTable // compression table - dt dTable // decompression table - nodes []nodeElt - tmpOut [4][]byte - fse *fse.Scratch - decPool sync.Pool // *[4][256]byte buffers. - huffWeight [maxSymbolValue + 1]byte -} - -// TransferCTable will transfer the previously used compression table. -func (s *Scratch) TransferCTable(src *Scratch) { - if cap(s.prevTable) < len(src.prevTable) { - s.prevTable = make(cTable, 0, maxSymbolValue+1) - } - s.prevTable = s.prevTable[:len(src.prevTable)] - copy(s.prevTable, src.prevTable) - s.prevTableLog = src.prevTableLog -} - -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if len(in) > BlockSizeMax { - return nil, ErrTooBig - } - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = maxSymbolValue - } - if s.TableLog == 0 { - s.TableLog = tableLogDefault - } - if s.TableLog > tableLogMax || s.TableLog < minTablelog { - return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) - } - if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { - s.MaxDecodedSize = BlockSizeMax - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - s.Out = s.Out[:0] - - s.OutTable = nil - s.OutData = nil - if cap(s.nodes) < huffNodesLen+1 { - s.nodes = make([]nodeElt, 0, huffNodesLen+1) - } - s.nodes = s.nodes[:0] - if s.fse == nil { - s.fse = &fse.Scratch{} - } - s.br.init(in) - - return s, nil -} - -type cTable []cTableEntry - -func (c cTable) write(s *Scratch) error { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - s.Out = append(s.Out, uint8(len(b))) - s.Out = append(s.Out, b...) - return nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return ErrIncompressible - } - op := s.Out - // special case, pack weights 4 bits/weight. - op = append(op, 128|(maxSymbolValue-1)) - // be sure it doesn't cause msan issue in final combination - huffWeight[maxSymbolValue] = 0 - for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { - op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) - } - s.Out = op - return nil -} - -func (c cTable) estTableSize(s *Scratch) (sz int, err error) { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - sz += 1 + len(b) - return sz, nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return 0, ErrIncompressible - } - // special case, pack weights 4 bits/weight. - sz += 1 + int(maxSymbolValue/2) - return sz, nil -} - -// estimateSize returns the estimated size in bytes of the input represented in the -// histogram supplied. -func (c cTable) estimateSize(hist []uint32) int { - nbBits := uint32(7) - for i, v := range c[:len(hist)] { - nbBits += uint32(v.nBits) * hist[i] - } - return int(nbBits >> 3) -} - -// minSize returns the minimum possible size considering the shannon limit. -func (s *Scratch) minSize(total int) int { - nbBits := float64(7) - fTotal := float64(total) - for _, v := range s.count[:s.symbolLen] { - n := float64(v) - if n > 0 { - nbBits += math.Log2(fTotal/n) * n - } - } - return int(nbBits) >> 3 -} - -func highBit32(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/backend/services/controller/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go deleted file mode 100644 index 3954c51..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go +++ /dev/null @@ -1,34 +0,0 @@ -// Package cpuinfo gives runtime info about the current CPU. -// -// This is a very limited module meant for use internally -// in this project. For more versatile solution check -// https://github.com/klauspost/cpuid. -package cpuinfo - -// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. -func HasBMI1() bool { - return hasBMI1 -} - -// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. -func HasBMI2() bool { - return hasBMI2 -} - -// DisableBMI2 will disable BMI2, for testing purposes. -// Call returned function to restore previous state. -func DisableBMI2() func() { - old := hasBMI2 - hasBMI2 = false - return func() { - hasBMI2 = old - } -} - -// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. -func HasBMI() bool { - return HasBMI1() && HasBMI2() -} - -var hasBMI1 bool -var hasBMI2 bool diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/backend/services/controller/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go deleted file mode 100644 index e802579..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package cpuinfo - -// go:noescape -func x86extensions() (bmi1, bmi2 bool) - -func init() { - hasBMI1, hasBMI2 = x86extensions() -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/backend/services/controller/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s deleted file mode 100644 index 4465fbe..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s +++ /dev/null @@ -1,36 +0,0 @@ -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" -#include "funcdata.h" -#include "go_asm.h" - -TEXT ·x86extensions(SB), NOSPLIT, $0 - // 1. determine max EAX value - XORQ AX, AX - CPUID - - CMPQ AX, $7 - JB unsupported - - // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" - MOVQ $7, AX - MOVQ $0, CX - CPUID - - BTQ $3, BX // bit 3 = BMI1 - SETCS AL - - BTQ $8, BX // bit 8 = BMI2 - SETCS AH - - MOVB AL, bmi1+0(FP) - MOVB AH, bmi2+1(FP) - RET - -unsupported: - XORQ AX, AX - MOVB AL, bmi1+0(FP) - MOVB AL, bmi2+1(FP) - RET diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/LICENSE deleted file mode 100644 index 6050c10..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/decode.go deleted file mode 100644 index 40796a4..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/decode.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Decode handles the Snappy block format, not the Snappy stream format. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -// -// Reader handles the Snappy stream format, not the Snappy block format. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -func (r *Reader) fill() error { - for r.i >= r.j { - if !r.readFull(r.buf[:4], true) { - return r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.decoded[:n], false) { - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return r.err - } - } - - return nil -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil -} - -// ReadByte satisfies the io.ByteReader interface. -func (r *Reader) ReadByte() (byte, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - c := r.decoded[r.i] - r.i++ - return c, nil -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go deleted file mode 100644 index 77395a6..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. - // If no overlap, use the built-in copy: - if offset >= length { - copy(dst[d:d+length], dst[d-offset:]) - d += length - continue - } - - // Unlike the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - // - // We align the slices into a and b and show the compiler they are the same size. - // This allows the loop to run without bounds checks. - a := dst[d : d+length] - b := dst[d-offset:] - b = b[:len(a)] - for i := range a { - a[i] = b[i] - } - d += length - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/encode.go deleted file mode 100644 index 13c6040..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/encode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Encode handles the Snappy block format, not the Snappy stream format. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -// -// Writer handles the Snappy stream format, not the Snappy block format. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go deleted file mode 100644 index 2aa6a95..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// EncodeBlockInto exposes encodeBlock but checks dst size. -func EncodeBlockInto(dst, src []byte) (d int) { - if MaxEncodedLen(len(src)) > len(dst) { - return 0 - } - - // encodeBlock breaks on too big blocks, so split. - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return d -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/snappy.go deleted file mode 100644 index 34d01f4..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/internal/snapref/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snapref implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snapref - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/s2sx.mod b/backend/services/controller/vendor/github.com/klauspost/compress/s2sx.mod deleted file mode 100644 index 2263853..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/s2sx.mod +++ /dev/null @@ -1,4 +0,0 @@ -module github.com/klauspost/compress - -go 1.16 - diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/s2sx.sum b/backend/services/controller/vendor/github.com/klauspost/compress/s2sx.sum deleted file mode 100644 index e69de29..0000000 diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/README.md b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/README.md deleted file mode 100644 index bdd49c8..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/README.md +++ /dev/null @@ -1,441 +0,0 @@ -# zstd - -[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. -It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. -A high performance compression algorithm is implemented. For now focused on speed. - -This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. - -This package is pure Go and without use of "unsafe". - -The `zstd` package is provided as open source software using a Go standard license. - -Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. - -For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). - -## Installation - -Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. - -[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) - -## Compressor - -### Status: - -STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively -used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. - -There may still be specific combinations of data types/size/settings that could lead to edge cases, -so as always, testing is recommended. - -For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. - -* The "Fastest" compression ratio is roughly equivalent to zstd level 1. -* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). -* The "Better" compression ratio is roughly equivalent to zstd level 7. -* The "Best" compression ratio is roughly equivalent to zstd level 11. - -In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. -The compression ratio compared to stdlib is around level 3, but usually 3x as fast. - - -### Usage - -An Encoder can be used for either compressing a stream via the -`io.WriteCloser` interface supported by the Encoder or as multiple independent -tasks via the `EncodeAll` function. -Smaller encodes are encouraged to use the EncodeAll function. -Use `NewWriter` to create a new instance that can be used for both. - -To create a writer with default options, do like this: - -```Go -// Compress input to output. -func Compress(in io.Reader, out io.Writer) error { - enc, err := zstd.NewWriter(out) - if err != nil { - return err - } - _, err = io.Copy(enc, in) - if err != nil { - enc.Close() - return err - } - return enc.Close() -} -``` - -Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. -Even if your encode fails, you should still call `Close()` to release any resources that may be held up. - -The above is fine for big encodes. However, whenever possible try to *reuse* the writer. - -To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. -This will allow the encoder to reuse all resources and avoid wasteful allocations. - -Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part -of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change -in the future. So if you want to limit concurrency for future updates, specify the concurrency -you would like. - -If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` -which will compress input as each block is completed, blocking on writes until each has completed. - -You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined -compression settings can be specified. - -#### Future Compatibility Guarantees - -This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. - -The goal will be to keep the default efficiency at the default zstd (level 3). -However the encoding should never be assumed to remain the same, -and you should not use hashes of compressed output for similarity checks. - -The Encoder can be assumed to produce the same output from the exact same code version. -However, the may be modes in the future that break this, -although they will not be enabled without an explicit option. - -This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. - -Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), -[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) -and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). - -#### Blocks - -For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. - -`EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently. -Each call will only run on a same goroutine as the caller. - -Encoded blocks can be concatenated and the result will be the combined input stream. -Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. - -Especially when encoding blocks you should take special care to reuse the encoder. -This will effectively make it run without allocations after a warmup period. -To make it run completely without allocations, supply a destination buffer with space for all content. - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a writer that caches compressors. -// For this operation type we supply a nil Reader. -var encoder, _ = zstd.NewWriter(nil) - -// Compress a buffer. -// If you have a destination buffer, the allocation in the call can also be eliminated. -func Compress(src []byte) []byte { - return encoder.EncodeAll(src, make([]byte, 0, len(src))) -} -``` - -You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` -option when creating the writer. - -Using the Encoder for both a stream and individual blocks concurrently is safe. - -### Performance - -I have collected some speed examples to compare speed and compression against other compressors. - -* `file` is the input file. -* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. -* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". -* `insize`/`outsize` is the input/output size. -* `millis` is the number of milliseconds used for compression. -* `mb/s` is megabytes (2^20 bytes) per second. - -``` -Silesia Corpus: -http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip - -This package: -file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73821326 634 318.47 -silesia.tar zskp 2 211947520 67655404 1508 133.96 -silesia.tar zskp 3 211947520 64746933 3000 67.37 -silesia.tar zskp 4 211947520 60073508 16926 11.94 - -cgo zstd: -silesia.tar zstd 1 211947520 73605392 543 371.56 -silesia.tar zstd 3 211947520 66793289 864 233.68 -silesia.tar zstd 6 211947520 62916450 1913 105.66 -silesia.tar zstd 9 211947520 60212393 5063 39.92 - -gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1498 134.87 -silesia.tar gzkp 1 211947520 80088272 1009 200.31 - -GOB stream of binary data. Highly compressible. -https://files.klauspost.com/compress/gob-stream.7z - -file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 233948096 3230 564.34 -gob-stream zskp 2 1911399616 203997694 4997 364.73 -gob-stream zskp 3 1911399616 173526523 13435 135.68 -gob-stream zskp 4 1911399616 162195235 47559 38.33 - -gob-stream zstd 1 1911399616 249810424 2637 691.26 -gob-stream zstd 3 1911399616 208192146 3490 522.31 -gob-stream zstd 6 1911399616 193632038 6687 272.56 -gob-stream zstd 9 1911399616 177620386 16175 112.70 - -gob-stream gzstd 1 1911399616 357382013 9046 201.49 -gob-stream gzkp 1 1911399616 359136669 4885 373.08 - -The test data for the Large Text Compression Benchmark is the first -10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. -http://mattmahoney.net/dc/textdata.html - -file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343833605 3687 258.64 -enwik9 zskp 2 1000000000 317001237 7672 124.29 -enwik9 zskp 3 1000000000 291915823 15923 59.89 -enwik9 zskp 4 1000000000 261710291 77697 12.27 - -enwik9 zstd 1 1000000000 358072021 3110 306.65 -enwik9 zstd 3 1000000000 313734672 4784 199.35 -enwik9 zstd 6 1000000000 295138875 10290 92.68 -enwik9 zstd 9 1000000000 278348700 28549 33.40 - -enwik9 gzstd 1 1000000000 382578136 8608 110.78 -enwik9 gzkp 1 1000000000 382781160 5628 169.45 - -Highly compressible JSON file. -https://files.klauspost.com/compress/github-june-2days-2019.json.zst - -file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 -github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 -github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 -github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 - -github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 -github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 -github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 -github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 - -github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 -github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 - -VM Image, Linux mint with a few installed applications: -https://files.klauspost.com/compress/rawstudio-mint14.7z - -file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 -rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 -rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 -rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 - -rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 -rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 -rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 -rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 - -rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 -rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 - -CSV data: -https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst - -file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 -nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 -nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 -nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 - -nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 -nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 -nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 -nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 - -nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 -nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 -``` - -## Decompressor - -Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. - -This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), -kindly supplied by [fuzzit.dev](https://fuzzit.dev/). -The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, -or run it past its limits with ANY input provided. - -### Usage - -The package has been designed for two main usages, big streams of data and smaller in-memory buffers. -There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. - -For streaming use a simple setup could look like this: - -```Go -import "github.com/klauspost/compress/zstd" - -func Decompress(in io.Reader, out io.Writer) error { - d, err := zstd.NewReader(in) - if err != nil { - return err - } - defer d.Close() - - // Copy content... - _, err = io.Copy(out, d) - return err -} -``` - -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, -when running with default settings. -Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. - -Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. -However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data -as it is being requested only. - -For decoding buffers, it could look something like this: - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a reader that caches decompressors. -// For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) - -// Decompress a buffer. We don't supply a destination buffer, -// so it will be allocated by the decoder. -func Decompress(src []byte) ([]byte, error) { - return decoder.DecodeAll(src, nil) -} -``` - -Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. -By default 4 decompressors will be created. - -It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. -It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. - -### Dictionaries - -Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. - -Dictionaries are added individually to Decoders. -Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. -To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. -Several dictionaries can be added at once. - -The dictionary will be used automatically for the data that specifies them. -A re-used Decoder will still contain the dictionaries registered. - -When registering multiple dictionaries with the same ID, the last one will be used. - -It is possible to use dictionaries when compressing data. - -To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used -and it will likely be used even if it doesn't improve compression. - -The used dictionary must be used to decompress the content. - -For any real gains, the dictionary should be built with similar data. -If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. -Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. -For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). - -For now there is a fixed startup performance penalty for compressing content with dictionaries. -This will likely be improved over time. Just be aware to test performance when implementing. - -### Allocation-less operation - -The decoder has been designed to operate without allocations after a warmup. - -This means that you should *store* the decoder for best performance. -To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. -A decoder can safely be re-used even if the previous stream failed. - -To release the resources, you must call the `Close()` function on a decoder. -After this it can *no longer be reused*, but all running goroutines will be stopped. -So you *must* use this if you will no longer need the Reader. - -For decompressing smaller buffers a single decoder can be used. -When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. -In this case no unneeded allocations should be made. - -### Concurrency - -The buffer decoder does everything on the same goroutine and does nothing concurrently. -It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. - -The stream decoder will create goroutines that: - -1) Reads input and splits the input into blocks. -2) Decompression of literals. -3) Decompression of sequences. -4) Reconstruction of output stream. - -So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. - -The concurrency level will, for streams, determine how many blocks ahead the compression will start. - -Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. - -In practice this means that concurrency is often limited to utilizing about 3 cores effectively. - -### Benchmarks - -The first two are streaming decodes and the last are smaller inputs. - -Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. - -``` -BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op -BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op - -Concurrent blocks, performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op -``` - -This reflects the performance around May 2022, but this may be out of date. - -## Zstd inside ZIP files - -It is possible to use zstandard to compress individual files inside zip archives. -While this isn't widely supported it can be useful for internal files. - -To support the compression and decompression of these files you must register a compressor and decompressor. - -It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT -use the global registration functions. The main reason for this is that 2 registrations from -different packages will result in a panic. - -It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip -files concurrently, and using a single instance will allow reusing some resources. - -See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for -how to compress and decompress files inside zip archives. - -# Contributions - -Contributions are always welcome. -For new features/fixes, remember to add tests and for performance enhancements include benchmarks. - -For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). - -This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/bitreader.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/bitreader.go deleted file mode 100644 index 25ca983..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "math/bits" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - value uint64 // Maybe use [16]byte, but shifting is awkward. - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) int { - if n == 0 /*|| b.bitsRead >= 64 */ { - return 0 - } - return int(b.get32BitsFast(n)) -} - -// get32BitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) get32BitsFast(n uint8) uint32 { - const regMask = 64 - 1 - v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - v := b.in[len(b.in)-8:] - b.in = b.in[:len(b.in)-8] - b.value = binary.LittleEndian.Uint64(v) - b.bitsRead = 0 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if len(b.in) >= 4 { - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - return - } - - b.bitsRead -= uint8(8 * len(b.in)) - for len(b.in) > 0 { - b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) - b.in = b.in[:len(b.in)-1] - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return len(b.in) == 0 && b.bitsRead >= 64 -} - -// overread returns true if more bits have been requested than is on the stream. -func (b *bitReader) overread() bool { - return b.bitsRead > 64 -} - -// remain returns the number of bits remaining. -func (b *bitReader) remain() uint { - return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if !b.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/bitwriter.go deleted file mode 100644 index 1952f17..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package zstd - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -var bitMask32 = [32]uint32{ - 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, - 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, - 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, - 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, -} // up to 32 bits - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits32NC will add up to 31 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits32NC(value uint32, bits uint8) { - b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits64NC will add up to 64 bits. -// There must be space for 32 bits. -func (b *bitWriter) addBits64NC(value uint64, bits uint8) { - if bits <= 31 { - b.addBits32Clean(uint32(value), bits) - return - } - b.addBits32Clean(uint32(value), 32) - b.flush32() - b.addBits32Clean(uint32(value>>32), bits-32) -} - -// addBits32Clean will add up to 32 bits. -// It will not check if there is space for them. -// The input must not contain more bits than specified. -func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/blockdec.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/blockdec.go deleted file mode 100644 index 9f17ce6..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ /dev/null @@ -1,726 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "hash/crc32" - "io" - "os" - "path/filepath" - "sync" - - "github.com/klauspost/compress/huff0" - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type blockType uint8 - -//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex - -const ( - blockTypeRaw blockType = iota - blockTypeRLE - blockTypeCompressed - blockTypeReserved -) - -type literalsBlockType uint8 - -const ( - literalsBlockRaw literalsBlockType = iota - literalsBlockRLE - literalsBlockCompressed - literalsBlockTreeless -) - -const ( - // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) - maxCompressedBlockSize = 128 << 10 - - compressedBlockOverAlloc = 16 - maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc - - // Maximum possible block size (all Raw+Uncompressed). - maxBlockSize = (1 << 21) - 1 - - maxMatchLen = 131074 - maxSequences = 0x7f00 + 0xffff - - // We support slightly less than the reference decoder to be able to - // use ints on 32 bit archs. - maxOffsetBits = 30 -) - -var ( - huffDecoderPool = sync.Pool{New: func() interface{} { - return &huff0.Scratch{} - }} - - fseDecoderPool = sync.Pool{New: func() interface{} { - return &fseDecoder{} - }} -) - -type blockDec struct { - // Raw source data of the block. - data []byte - dataStorage []byte - - // Destination of the decoded data. - dst []byte - - // Buffer for literals data. - literalBuf []byte - - // Window size of the block. - WindowSize uint64 - - err error - - // Check against this crc, if hasCRC is true. - checkCRC uint32 - hasCRC bool - - // Frame to use for singlethreaded decoding. - // Should not be used by the decoder itself since parent may be another frame. - localFrame *frameDec - - sequence []seqVals - - async struct { - newHist *history - literals []byte - seqData []byte - seqSize int // Size of uncompressed sequences - fcs uint64 - } - - // Block is RLE, this is the size. - RLESize uint32 - - Type blockType - - // Is this the last block of a frame? - Last bool - - // Use less memory - lowMem bool -} - -func (b *blockDec) String() string { - if b == nil { - return "" - } - return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) -} - -func newBlockDec(lowMem bool) *blockDec { - b := blockDec{ - lowMem: lowMem, - } - return &b -} - -// reset will reset the block. -// Input must be a start of a block and will be at the end of the block when returned. -func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { - b.WindowSize = windowSize - tmp, err := br.readSmall(3) - if err != nil { - println("Reading block header:", err) - return err - } - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - b.Last = bh&1 != 0 - b.Type = blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - maxSize := maxCompressedBlockSizeAlloc - switch b.Type { - case blockTypeReserved: - return ErrReservedBlockType - case blockTypeRLE: - if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { - if debugDecoder { - printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrWindowSizeExceeded - } - b.RLESize = uint32(cSize) - if b.lowMem { - maxSize = cSize - } - cSize = 1 - case blockTypeCompressed: - if debugDecoder { - println("Data size on stream:", cSize) - } - b.RLESize = 0 - maxSize = maxCompressedBlockSizeAlloc - if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) + compressedBlockOverAlloc - } - if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { - if debugDecoder { - printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrCompressedSizeTooBig - } - // Empty compressed blocks must at least be 2 bytes - // for Literals_Block_Type and one for Sequences_Section_Header. - if cSize < 2 { - return ErrBlockTooSmall - } - case blockTypeRaw: - if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { - if debugDecoder { - printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrWindowSizeExceeded - } - - b.RLESize = 0 - // We do not need a destination for raw blocks. - maxSize = -1 - default: - panic("Invalid block type") - } - - // Read block data. - if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { - // byteBuf doesn't need a destination buffer. - if b.lowMem || cSize > maxCompressedBlockSize { - b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) - } else { - b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) - } - } - b.data, err = br.readBig(cSize, b.dataStorage) - if err != nil { - if debugDecoder { - println("Reading block:", err, "(", cSize, ")", len(b.data)) - printf("%T", br) - } - return err - } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } - return nil -} - -// sendEOF will make the decoder send EOF on this frame. -func (b *blockDec) sendErr(err error) { - b.Last = true - b.Type = blockTypeReserved - b.err = err -} - -// Close will release resources. -// Closed blockDec cannot be reset. -func (b *blockDec) Close() { -} - -// decodeBuf -func (b *blockDec) decodeBuf(hist *history) error { - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxCompressedBlockSize) - } - } - b.dst = b.dst[:b.RLESize] - v := b.data[0] - for i := range b.dst { - b.dst[i] = v - } - hist.appendKeep(b.dst) - return nil - case blockTypeRaw: - hist.appendKeep(b.data) - return nil - case blockTypeCompressed: - saved := b.dst - // Append directly to history - if hist.ignoreBuffer == 0 { - b.dst = hist.b - hist.b = nil - } else { - b.dst = b.dst[:0] - } - err := b.decodeCompressed(hist) - if debugDecoder { - println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) - } - if hist.ignoreBuffer == 0 { - hist.b = b.dst - b.dst = saved - } else { - hist.appendKeep(b.dst) - } - return err - case blockTypeReserved: - // Used for returning errors. - return b.err - default: - panic("Invalid block type") - } -} - -func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { - // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header - if len(in) < 2 { - return in, ErrBlockTooSmall - } - - litType := literalsBlockType(in[0] & 3) - var litRegenSize int - var litCompSize int - sizeFormat := (in[0] >> 2) & 3 - var fourStreams bool - var literals []byte - switch litType { - case literalsBlockRaw, literalsBlockRLE: - switch sizeFormat { - case 0, 2: - // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. - litRegenSize = int(in[0] >> 3) - in = in[1:] - case 1: - // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) - in = in[2:] - case 3: - // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) - in = in[3:] - } - case literalsBlockCompressed, literalsBlockTreeless: - switch sizeFormat { - case 0, 1: - // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) - litRegenSize = int(n & 1023) - litCompSize = int(n >> 10) - fourStreams = sizeFormat == 1 - in = in[3:] - case 2: - fourStreams = true - if len(in) < 4 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) - litRegenSize = int(n & 16383) - litCompSize = int(n >> 14) - in = in[4:] - case 3: - fourStreams = true - if len(in) < 5 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) - litRegenSize = int(n & 262143) - litCompSize = int(n >> 18) - in = in[5:] - } - } - if debugDecoder { - println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) - } - if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { - return in, ErrWindowSizeExceeded - } - - switch litType { - case literalsBlockRaw: - if len(in) < litRegenSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return in, ErrBlockTooSmall - } - literals = in[:litRegenSize] - in = in[litRegenSize:] - //printf("Found %d uncompressed literals\n", litRegenSize) - case literalsBlockRLE: - if len(in) < 1 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return in, ErrBlockTooSmall - } - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - literals = b.literalBuf[:litRegenSize] - v := in[0] - for i := range literals { - literals[i] = v - } - in = in[1:] - if debugDecoder { - printf("Found %d RLE compressed literals\n", litRegenSize) - } - case literalsBlockTreeless: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return in, ErrBlockTooSmall - } - // Store compressed literals, so we defer decoding until we get history. - literals = in[:litCompSize] - in = in[litCompSize:] - if debugDecoder { - printf("Found %d compressed literals\n", litCompSize) - } - huff := hist.huffTree - if huff == nil { - return in, errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - var err error - // Use our out buffer. - huff.MaxDecodedSize = litRegenSize - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return in, err - } - if len(literals) != litRegenSize { - return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - - case literalsBlockCompressed: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return in, ErrBlockTooSmall - } - literals = in[:litCompSize] - in = in[litCompSize:] - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - huff := hist.huffTree - if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { - huff = huffDecoderPool.Get().(*huff0.Scratch) - if huff == nil { - huff = &huff0.Scratch{} - } - } - var err error - if debugDecoder { - println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) - } - huff, literals, err = huff0.ReadTable(literals, huff) - if err != nil { - println("reading huffman table:", err) - return in, err - } - hist.huffTree = huff - huff.MaxDecodedSize = litRegenSize - // Use our out buffer. - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - if err != nil { - println("decoding compressed literals:", err) - return in, err - } - // Make sure we don't leak our literals buffer - if len(literals) != litRegenSize { - return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - // Re-cap to get extra size. - literals = b.literalBuf[:len(literals)] - if debugDecoder { - printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) - } - } - hist.decoders.literals = literals - return in, nil -} - -// decodeCompressed will start decompressing a block. -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - in, err := b.decodeLiterals(in, hist) - if err != nil { - return err - } - err = b.prepareSequences(in, hist) - if err != nil { - return err - } - if hist.decoders.nSeqs == 0 { - b.dst = append(b.dst, hist.decoders.literals...) - return nil - } - before := len(hist.decoders.out) - err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) - if err != nil { - return err - } - if hist.decoders.maxSyncLen > 0 { - hist.decoders.maxSyncLen += uint64(before) - hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) - } - b.dst = hist.decoders.out - hist.recentOffsets = hist.decoders.prevOffset - return nil -} - -func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { - if debugDecoder { - printf("prepareSequences: %d byte(s) input\n", len(in)) - } - // Decode Sequences - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section - if len(in) < 1 { - return ErrBlockTooSmall - } - var nSeqs int - seqHeader := in[0] - switch { - case seqHeader < 128: - nSeqs = int(seqHeader) - in = in[1:] - case seqHeader < 255: - if len(in) < 2 { - return ErrBlockTooSmall - } - nSeqs = int(seqHeader-128)<<8 | int(in[1]) - in = in[2:] - case seqHeader == 255: - if len(in) < 3 { - return ErrBlockTooSmall - } - nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) - in = in[3:] - } - if nSeqs == 0 && len(in) != 0 { - // When no sequences, there should not be any more data... - if debugDecoder { - printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) - } - return ErrUnexpectedBlockSize - } - - var seqs = &hist.decoders - seqs.nSeqs = nSeqs - if nSeqs > 0 { - if len(in) < 1 { - return ErrBlockTooSmall - } - br := byteReader{b: in, off: 0} - compMode := br.Uint8() - br.advance(1) - if debugDecoder { - printf("Compression modes: 0b%b", compMode) - } - for i := uint(0); i < 3; i++ { - mode := seqCompMode((compMode >> (6 - i*2)) & 3) - if debugDecoder { - println("Table", tableIndex(i), "is", mode) - } - var seq *sequenceDec - switch tableIndex(i) { - case tableLiteralLengths: - seq = &seqs.litLengths - case tableOffsets: - seq = &seqs.offsets - case tableMatchLengths: - seq = &seqs.matchLengths - default: - panic("unknown table") - } - switch mode { - case compModePredefined: - if seq.fse != nil && !seq.fse.preDefined { - fseDecoderPool.Put(seq.fse) - } - seq.fse = &fsePredef[i] - case compModeRLE: - if br.remain() < 1 { - return ErrBlockTooSmall - } - v := br.Uint8() - br.advance(1) - if seq.fse == nil || seq.fse.preDefined { - seq.fse = fseDecoderPool.Get().(*fseDecoder) - } - symb, err := decSymbolValue(v, symbolTableX[i]) - if err != nil { - printf("RLE Transform table (%v) error: %v", tableIndex(i), err) - return err - } - seq.fse.setRLE(symb) - if debugDecoder { - printf("RLE set to 0x%x, code: %v", symb, v) - } - case compModeFSE: - println("Reading table for", tableIndex(i)) - if seq.fse == nil || seq.fse.preDefined { - seq.fse = fseDecoderPool.Get().(*fseDecoder) - } - err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) - if err != nil { - println("Read table error:", err) - return err - } - err = seq.fse.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debugDecoder { - println("Read table ok", "symbolLen:", seq.fse.symbolLen) - } - case compModeRepeat: - seq.repeat = true - } - if br.overread() { - return io.ErrUnexpectedEOF - } - } - in = br.unread() - } - if debugDecoder { - println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") - } - - if nSeqs == 0 { - if len(b.sequence) > 0 { - b.sequence = b.sequence[:0] - } - return nil - } - br := seqs.br - if br == nil { - br = &bitReader{} - } - if err := br.init(in); err != nil { - return err - } - - if err := seqs.initialize(br, hist, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - // Extract blocks... - if false && hist.dict == nil { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) - var buf bytes.Buffer - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) - buf.Write(in) - os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) - } - - return nil -} - -func (b *blockDec) decodeSequences(hist *history) error { - if cap(b.sequence) < hist.decoders.nSeqs { - if b.lowMem { - b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) - } else { - b.sequence = make([]seqVals, 0, 0x7F00+0xffff) - } - } - b.sequence = b.sequence[:hist.decoders.nSeqs] - if hist.decoders.nSeqs == 0 { - hist.decoders.seqSize = len(hist.decoders.literals) - return nil - } - hist.decoders.windowSize = hist.windowSize - hist.decoders.prevOffset = hist.recentOffsets - - err := hist.decoders.decode(b.sequence) - hist.recentOffsets = hist.decoders.prevOffset - return err -} - -func (b *blockDec) executeSequences(hist *history) error { - hbytes := hist.b - if len(hbytes) > hist.windowSize { - hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history anymore. - if hist.dict != nil { - hist.dict.content = nil - } - } - hist.decoders.windowSize = hist.windowSize - hist.decoders.out = b.dst[:0] - err := hist.decoders.execute(b.sequence, hbytes) - if err != nil { - return err - } - return b.updateHistory(hist) -} - -func (b *blockDec) updateHistory(hist *history) error { - if len(b.data) > maxCompressedBlockSize { - return fmt.Errorf("compressed block size too large (%d)", len(b.data)) - } - // Set output and release references. - b.dst = hist.decoders.out - hist.recentOffsets = hist.decoders.prevOffset - - if b.Last { - // if last block we don't care about history. - println("Last block, no history returned") - hist.b = hist.b[:0] - return nil - } else { - hist.append(b.dst) - if debugDecoder { - println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) - } - } - hist.decoders.out, hist.decoders.literals = nil, nil - - return nil -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/blockenc.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/blockenc.go deleted file mode 100644 index 2cfe925..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ /dev/null @@ -1,889 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" - "math/bits" - - "github.com/klauspost/compress/huff0" -) - -type blockEnc struct { - size int - literals []byte - sequences []seq - coders seqCoders - litEnc *huff0.Scratch - dictLitEnc *huff0.Scratch - wr bitWriter - - extraLits int - output []byte - recentOffsets [3]uint32 - prevRecentOffsets [3]uint32 - - last bool - lowMem bool -} - -// init should be used once the block has been created. -// If called more than once, the effect is the same as calling reset. -func (b *blockEnc) init() { - if b.lowMem { - // 1K literals - if cap(b.literals) < 1<<10 { - b.literals = make([]byte, 0, 1<<10) - } - const defSeqs = 20 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - // 1K - if cap(b.output) < 1<<10 { - b.output = make([]byte, 0, 1<<10) - } - } else { - if cap(b.literals) < maxCompressedBlockSize { - b.literals = make([]byte, 0, maxCompressedBlockSize) - } - const defSeqs = 2000 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - if cap(b.output) < maxCompressedBlockSize { - b.output = make([]byte, 0, maxCompressedBlockSize) - } - } - - if b.coders.mlEnc == nil { - b.coders.mlEnc = &fseEncoder{} - b.coders.mlPrev = &fseEncoder{} - b.coders.ofEnc = &fseEncoder{} - b.coders.ofPrev = &fseEncoder{} - b.coders.llEnc = &fseEncoder{} - b.coders.llPrev = &fseEncoder{} - } - b.litEnc = &huff0.Scratch{WantLogLess: 4} - b.reset(nil) -} - -// initNewEncode can be used to reset offsets and encoders to the initial state. -func (b *blockEnc) initNewEncode() { - b.recentOffsets = [3]uint32{1, 4, 8} - b.litEnc.Reuse = huff0.ReusePolicyNone - b.coders.setPrev(nil, nil, nil) -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) reset(prev *blockEnc) { - b.extraLits = 0 - b.literals = b.literals[:0] - b.size = 0 - b.sequences = b.sequences[:0] - b.output = b.output[:0] - b.last = false - if prev != nil { - b.recentOffsets = prev.prevRecentOffsets - } - b.dictLitEnc = nil -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) swapEncoders(prev *blockEnc) { - b.coders.swap(&prev.coders) - b.litEnc, prev.litEnc = prev.litEnc, b.litEnc -} - -// blockHeader contains the information for a block header. -type blockHeader uint32 - -// setLast sets the 'last' indicator on a block. -func (h *blockHeader) setLast(b bool) { - if b { - *h = *h | 1 - } else { - const mask = (1 << 24) - 2 - *h = *h & mask - } -} - -// setSize will store the compressed size of a block. -func (h *blockHeader) setSize(v uint32) { - const mask = 7 - *h = (*h)&mask | blockHeader(v<<3) -} - -// setType sets the block type. -func (h *blockHeader) setType(t blockType) { - const mask = 1 | (((1 << 24) - 1) ^ 7) - *h = (*h & mask) | blockHeader(t<<1) -} - -// appendTo will append the block header to a slice. -func (h blockHeader) appendTo(b []byte) []byte { - return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) -} - -// String returns a string representation of the block. -func (h blockHeader) String() string { - return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) -} - -// literalsHeader contains literals header information. -type literalsHeader uint64 - -// setType can be used to set the type of literal block. -func (h *literalsHeader) setType(t literalsBlockType) { - const mask = math.MaxUint64 - 3 - *h = (*h & mask) | literalsHeader(t) -} - -// setSize can be used to set a single size, for uncompressed and RLE content. -func (h *literalsHeader) setSize(regenLen int) { - inBits := bits.Len32(uint32(regenLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case inBits < 5: - lh |= (uint64(regenLen) << 3) | (1 << 60) - if debugEncoder { - got := int(lh>>3) & 0xff - if got != regenLen { - panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) - } - } - case inBits < 12: - lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) - case inBits < 20: - lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) - default: - panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) - } - *h = literalsHeader(lh) -} - -// setSizes will set the size of a compressed literals section and the input length. -func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { - compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case compBits <= 10 && inBits <= 10: - if !single { - lh |= 1 << 2 - } - lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) - if debugEncoder { - const mmask = (1 << 24) - 1 - n := (lh >> 4) & mmask - if int(n&1023) != inLen { - panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) - } - if int(n>>10) != compLen { - panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) - } - } - case compBits <= 14 && inBits <= 14: - lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - case compBits <= 18 && inBits <= 18: - lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - default: - panic("internal error: block too big") - } - *h = literalsHeader(lh) -} - -// appendTo will append the literals header to a byte slice. -func (h literalsHeader) appendTo(b []byte) []byte { - size := uint8(h >> 60) - switch size { - case 1: - b = append(b, uint8(h)) - case 2: - b = append(b, uint8(h), uint8(h>>8)) - case 3: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) - case 4: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) - case 5: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) - default: - panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) - } - return b -} - -// size returns the output size with currently set values. -func (h literalsHeader) size() int { - return int(h >> 60) -} - -func (h literalsHeader) String() string { - return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) pushOffsets() { - b.prevRecentOffsets = b.recentOffsets -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) popOffsets() { - b.recentOffsets = b.prevRecentOffsets -} - -// matchOffset will adjust recent offsets and return the adjusted one, -// if it matches a previous offset. -func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if true { - if lits > 0 { - switch offset { - case b.recentOffsets[0]: - offset = 1 - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } else { - switch offset { - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 1 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[0] - 1: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } - } else { - offset += 3 - } - return offset -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRaw(a []byte) { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(a))) - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output[:0]) - b.output = append(b.output, a...) - if debugEncoder { - println("Adding RAW block, length", len(a), "last:", b.last) - } -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(src))) - bh.setType(blockTypeRaw) - dst = bh.appendTo(dst) - dst = append(dst, src...) - if debugEncoder { - println("Adding RAW block, length", len(src), "last:", b.last) - } - return dst -} - -// encodeLits can be used if the block is only litLen. -func (b *blockEnc) encodeLits(lits []byte, raw bool) error { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(lits))) - - // Don't compress extremely small blocks - if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { - if debugEncoder { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - } - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(lits) >= 1024 { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(lits, b.litEnc) - } else if len(lits) > 16 { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(lits, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - if err == nil && len(out)+5 > len(lits) { - // If we are close, we may still be worse or equal to raw. - var lh literalsHeader - lh.setSizes(len(out), len(lits), single) - if len(out)+lh.size() >= len(lits) { - err = huff0.ErrIncompressible - } - } - switch err { - case huff0.ErrIncompressible: - if debugEncoder { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - case huff0.ErrUseRLE: - if debugEncoder { - println("Adding RLE block, length", len(lits)) - } - bh.setType(blockTypeRLE) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits[0]) - return nil - case nil: - default: - return err - } - // Compressed... - // Now, allow reuse - b.litEnc.Reuse = huff0.ReusePolicyAllow - bh.setType(blockTypeCompressed) - var lh literalsHeader - if reUsed { - if debugEncoder { - println("Reused tree, compressed to", len(out)) - } - lh.setType(literalsBlockTreeless) - } else { - if debugEncoder { - println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - } - // Set sizes - lh.setSizes(len(out), len(lits), single) - bh.setSize(uint32(len(out) + lh.size() + 1)) - - // Write block headers. - b.output = bh.appendTo(b.output) - b.output = lh.appendTo(b.output) - // Add compressed data. - b.output = append(b.output, out...) - // No sequences. - b.output = append(b.output, 0) - return nil -} - -// fuzzFseEncoder can be used to fuzz the FSE encoder. -func fuzzFseEncoder(data []byte) int { - if len(data) > maxSequences || len(data) < 2 { - return 0 - } - enc := fseEncoder{} - hist := enc.Histogram() - maxSym := uint8(0) - for i, v := range data { - v = v & 63 - data[i] = v - hist[v]++ - if v > maxSym { - maxSym = v - } - } - if maxSym == 0 { - // All 0 - return 0 - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - cnt := maxCount(hist[:maxSym]) - if cnt == len(data) { - // RLE - return 0 - } - enc.HistogramFinished(maxSym, cnt) - err := enc.normalizeCount(len(data)) - if err != nil { - return 0 - } - _, err = enc.writeCount(nil) - if err != nil { - panic(err) - } - return 1 -} - -// encode will encode the block and append the output in b.output. -// Previous offset codes must be pushed if more blocks are expected. -func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { - if len(b.sequences) == 0 { - return b.encodeLits(b.literals, rawAllLits) - } - // We want some difference to at least account for the headers. - saved := b.size - len(b.literals) - (b.size >> 6) - if saved < 16 { - if org == nil { - return errIncompressible - } - b.popOffsets() - return b.encodeLits(org, rawAllLits) - } - - var bh blockHeader - var lh literalsHeader - bh.setLast(b.last) - bh.setType(blockTypeCompressed) - // Store offset of the block header. Needed when we know the size. - bhOffset := len(b.output) - b.output = bh.appendTo(b.output) - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(b.literals) >= 1024 && !raw { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) - } else if len(b.literals) > 16 && !raw { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - - if err == nil && len(out)+5 > len(b.literals) { - // If we are close, we may still be worse or equal to raw. - var lh literalsHeader - lh.setSize(len(b.literals)) - szRaw := lh.size() - lh.setSizes(len(out), len(b.literals), single) - szComp := lh.size() - if len(out)+szComp >= len(b.literals)+szRaw { - err = huff0.ErrIncompressible - } - } - switch err { - case huff0.ErrIncompressible: - lh.setType(literalsBlockRaw) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals...) - if debugEncoder { - println("Adding literals RAW, length", len(b.literals)) - } - case huff0.ErrUseRLE: - lh.setType(literalsBlockRLE) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals[0]) - if debugEncoder { - println("Adding literals RLE") - } - case nil: - // Compressed litLen... - if reUsed { - if debugEncoder { - println("reused tree") - } - lh.setType(literalsBlockTreeless) - } else { - if debugEncoder { - println("new tree, size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - if debugEncoder { - _, _, err := huff0.ReadTable(out, nil) - if err != nil { - panic(err) - } - } - } - lh.setSizes(len(out), len(b.literals), single) - if debugEncoder { - printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) - println("Adding literal header:", lh) - } - b.output = lh.appendTo(b.output) - b.output = append(b.output, out...) - b.litEnc.Reuse = huff0.ReusePolicyAllow - if debugEncoder { - println("Adding literals compressed") - } - default: - if debugEncoder { - println("Adding literals ERROR:", err) - } - return err - } - // Sequence compression - - // Write the number of sequences - switch { - case len(b.sequences) < 128: - b.output = append(b.output, uint8(len(b.sequences))) - case len(b.sequences) < 0x7f00: // TODO: this could be wrong - n := len(b.sequences) - b.output = append(b.output, 128+uint8(n>>8), uint8(n)) - default: - n := len(b.sequences) - 0x7f00 - b.output = append(b.output, 255, uint8(n), uint8(n>>8)) - } - if debugEncoder { - println("Encoding", len(b.sequences), "sequences") - } - b.genCodes() - llEnc := b.coders.llEnc - ofEnc := b.coders.ofEnc - mlEnc := b.coders.mlEnc - err = llEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = ofEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = mlEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - - // Choose the best compression mode for each type. - // Will evaluate the new vs predefined and previous. - chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { - // See if predefined/previous is better - hist := cur.count[:cur.symbolLen] - nSize := cur.approxSize(hist) + cur.maxHeaderSize() - predefSize := preDef.approxSize(hist) - prevSize := prev.approxSize(hist) - - // Add a small penalty for new encoders. - // Don't bother with extremely small (<2 byte gains). - nSize = nSize + (nSize+2*8*16)>>4 - switch { - case predefSize <= prevSize && predefSize <= nSize || forcePreDef: - if debugEncoder { - println("Using predefined", predefSize>>3, "<=", nSize>>3) - } - return preDef, compModePredefined - case prevSize <= nSize: - if debugEncoder { - println("Using previous", prevSize>>3, "<=", nSize>>3) - } - return prev, compModeRepeat - default: - if debugEncoder { - println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") - println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) - } - return cur, compModeFSE - } - } - - // Write compression mode - var mode uint8 - if llEnc.useRLE { - mode |= uint8(compModeRLE) << 6 - llEnc.setRLE(b.sequences[0].llCode) - if debugEncoder { - println("llEnc.useRLE") - } - } else { - var m seqCompMode - llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) - mode |= uint8(m) << 6 - } - if ofEnc.useRLE { - mode |= uint8(compModeRLE) << 4 - ofEnc.setRLE(b.sequences[0].ofCode) - if debugEncoder { - println("ofEnc.useRLE") - } - } else { - var m seqCompMode - ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) - mode |= uint8(m) << 4 - } - - if mlEnc.useRLE { - mode |= uint8(compModeRLE) << 2 - mlEnc.setRLE(b.sequences[0].mlCode) - if debugEncoder { - println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) - } - } else { - var m seqCompMode - mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) - mode |= uint8(m) << 2 - } - b.output = append(b.output, mode) - if debugEncoder { - printf("Compression modes: 0b%b", mode) - } - b.output, err = llEnc.writeCount(b.output) - if err != nil { - return err - } - start := len(b.output) - b.output, err = ofEnc.writeCount(b.output) - if err != nil { - return err - } - if false { - println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) - for i, v := range ofEnc.norm[:ofEnc.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) - } - } - b.output, err = mlEnc.writeCount(b.output) - if err != nil { - return err - } - - // Maybe in block? - wr := &b.wr - wr.reset(b.output) - - var ll, of, ml cState - - // Current sequence - seq := len(b.sequences) - 1 - s := b.sequences[seq] - llEnc.setBits(llBitsTable[:]) - mlEnc.setBits(mlBitsTable[:]) - ofEnc.setBits(nil) - - llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] - - // We have 3 bounds checks here (and in the loop). - // Since we are iterating backwards it is kinda hard to avoid. - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - ll.init(wr, &llEnc.ct, llB) - of.init(wr, &ofEnc.ct, ofB) - wr.flush32() - ml.init(wr, &mlEnc.ct, mlB) - - // Each of these lookups also generates a bounds check. - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.flush32() - wr.addBits32NC(s.offset, ofB.outBits) - if debugSequences { - println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) - } - seq-- - // Store sequences in reverse... - for seq >= 0 { - s = b.sequences[seq] - - ofB := ofTT[s.ofCode] - wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. - //of.encode(ofB) - nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 - dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) - wr.addBits16NC(of.state, uint8(nbBitsOut)) - of.state = of.stateTable[dstState] - - // Accumulate extra bits. - outBits := ofB.outBits & 31 - extraBits := uint64(s.offset & bitMask32[outBits]) - extraBitsN := outBits - - mlB := mlTT[s.mlCode] - //ml.encode(mlB) - nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 - dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) - wr.addBits16NC(ml.state, uint8(nbBitsOut)) - ml.state = ml.stateTable[dstState] - - outBits = mlB.outBits & 31 - extraBits = extraBits<> 16 - dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) - wr.addBits16NC(ll.state, uint8(nbBitsOut)) - ll.state = ll.stateTable[dstState] - - outBits = llB.outBits & 31 - extraBits = extraBits<= b.size { - // Discard and encode as raw block. - b.output = b.encodeRawTo(b.output[:bhOffset], org) - b.popOffsets() - b.litEnc.Reuse = huff0.ReusePolicyNone - return nil - } - - // Size is output minus block header. - bh.setSize(uint32(len(b.output)-bhOffset) - 3) - if debugEncoder { - println("Rewriting block header", bh) - } - _ = bh.appendTo(b.output[bhOffset:bhOffset]) - b.coders.setPrev(llEnc, mlEnc, ofEnc) - return nil -} - -var errIncompressible = errors.New("incompressible") - -func (b *blockEnc) genCodes() { - if len(b.sequences) == 0 { - // nothing to do - return - } - if len(b.sequences) > math.MaxUint16 { - panic("can only encode up to 64K sequences") - } - // No bounds checks after here: - llH := b.coders.llEnc.Histogram() - ofH := b.coders.ofEnc.Histogram() - mlH := b.coders.mlEnc.Histogram() - for i := range llH { - llH[i] = 0 - } - for i := range ofH { - ofH[i] = 0 - } - for i := range mlH { - mlH[i] = 0 - } - - var llMax, ofMax, mlMax uint8 - for i := range b.sequences { - seq := &b.sequences[i] - v := llCode(seq.litLen) - seq.llCode = v - llH[v]++ - if v > llMax { - llMax = v - } - - v = ofCode(seq.offset) - seq.ofCode = v - ofH[v]++ - if v > ofMax { - ofMax = v - } - - v = mlCode(seq.matchLen) - seq.mlCode = v - mlH[v]++ - if v > mlMax { - mlMax = v - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) - } - } - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) - } - if debugAsserts && ofMax > maxOffsetBits { - panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) - } - if debugAsserts && llMax > maxLiteralLengthSymbol { - panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) - } - - b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) - b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) - b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/blocktype_string.go deleted file mode 100644 index 01a01e4..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/blocktype_string.go +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. - -package zstd - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[blockTypeRaw-0] - _ = x[blockTypeRLE-1] - _ = x[blockTypeCompressed-2] - _ = x[blockTypeReserved-3] -} - -const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" - -var _blockType_index = [...]uint8{0, 12, 24, 43, 60} - -func (i blockType) String() string { - if i >= blockType(len(_blockType_index)-1) { - return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[literalsBlockRaw-0] - _ = x[literalsBlockRLE-1] - _ = x[literalsBlockCompressed-2] - _ = x[literalsBlockTreeless-3] -} - -const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" - -var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} - -func (i literalsBlockType) String() string { - if i >= literalsBlockType(len(_literalsBlockType_index)-1) { - return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[compModePredefined-0] - _ = x[compModeRLE-1] - _ = x[compModeFSE-2] - _ = x[compModeRepeat-3] -} - -const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" - -var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} - -func (i seqCompMode) String() string { - if i >= seqCompMode(len(_seqCompMode_index)-1) { - return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[tableLiteralLengths-0] - _ = x[tableOffsets-1] - _ = x[tableMatchLengths-2] -} - -const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" - -var _tableIndex_index = [...]uint8{0, 19, 31, 48} - -func (i tableIndex) String() string { - if i >= tableIndex(len(_tableIndex_index)-1) { - return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/bytebuf.go deleted file mode 100644 index 55a3885..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "io" -) - -type byteBuffer interface { - // Read up to 8 bytes. - // Returns io.ErrUnexpectedEOF if this cannot be satisfied. - readSmall(n int) ([]byte, error) - - // Read >8 bytes. - // MAY use the destination slice. - readBig(n int, dst []byte) ([]byte, error) - - // Read a single byte. - readByte() (byte, error) - - // Skip n bytes. - skipN(n int64) error -} - -// in-memory buffer -type byteBuf []byte - -func (b *byteBuf) readSmall(n int) ([]byte, error) { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) readByte() (byte, error) { - bb := *b - if len(bb) < 1 { - return 0, io.ErrUnexpectedEOF - } - r := bb[0] - *b = bb[1:] - return r, nil -} - -func (b *byteBuf) skipN(n int64) error { - bb := *b - if n < 0 { - return fmt.Errorf("negative skip (%d) requested", n) - } - if int64(len(bb)) < n { - return io.ErrUnexpectedEOF - } - *b = bb[n:] - return nil -} - -// wrapper around a reader. -type readerWrapper struct { - r io.Reader - tmp [8]byte -} - -func (r *readerWrapper) readSmall(n int) ([]byte, error) { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - n2, err := io.ReadFull(r.r, r.tmp[:n]) - // We only really care about the actual bytes read. - if err != nil { - if err == io.EOF { - return nil, io.ErrUnexpectedEOF - } - if debugDecoder { - println("readSmall: got", n2, "want", n, "err", err) - } - return nil, err - } - return r.tmp[:n], nil -} - -func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { - if cap(dst) < n { - dst = make([]byte, n) - } - n2, err := io.ReadFull(r.r, dst[:n]) - if err == io.EOF && n > 0 { - err = io.ErrUnexpectedEOF - } - return dst[:n2], err -} - -func (r *readerWrapper) readByte() (byte, error) { - n2, err := io.ReadFull(r.r, r.tmp[:1]) - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return 0, err - } - if n2 != 1 { - return 0, io.ErrUnexpectedEOF - } - return r.tmp[0], nil -} - -func (r *readerWrapper) skipN(n int64) error { - n2, err := io.CopyN(io.Discard, r.r, n) - if n2 != n { - err = io.ErrUnexpectedEOF - } - return err -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/bytereader.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/bytereader.go deleted file mode 100644 index 0e59a24..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/bytereader.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// overread returns whether we have advanced too far. -func (b *byteReader) overread() bool { - return b.off > len(b.b) -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := int32(b2[3]) - v2 := int32(b2[2]) - v1 := int32(b2[1]) - v0 := int32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint8 returns the next byte -func (b *byteReader) Uint8() uint8 { - v := b.b[b.off] - return v -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - if r := b.remain(); r < 4 { - // Very rare - v := uint32(0) - for i := 1; i <= r; i++ { - v = (v << 8) | uint32(b.b[len(b.b)-i]) - } - return v - } - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint32NC returns a little endian uint32 starting at current offset. -// The caller must be sure if there are at least 4 bytes left. -func (b byteReader) Uint32NC() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/decodeheader.go deleted file mode 100644 index f6a2409..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2020+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "encoding/binary" - "errors" - "io" -) - -// HeaderMaxSize is the maximum size of a Frame and Block Header. -// If less is sent to Header.Decode it *may* still contain enough information. -const HeaderMaxSize = 14 + 3 - -// Header contains information about the first frame and block within that. -type Header struct { - // SingleSegment specifies whether the data is to be decompressed into a - // single contiguous memory segment. - // It implies that WindowSize is invalid and that FrameContentSize is valid. - SingleSegment bool - - // WindowSize is the window of data to keep while decoding. - // Will only be set if SingleSegment is false. - WindowSize uint64 - - // Dictionary ID. - // If 0, no dictionary. - DictionaryID uint32 - - // HasFCS specifies whether FrameContentSize has a valid value. - HasFCS bool - - // FrameContentSize is the expected uncompressed size of the entire frame. - FrameContentSize uint64 - - // Skippable will be true if the frame is meant to be skipped. - // This implies that FirstBlock.OK is false. - Skippable bool - - // SkippableID is the user-specific ID for the skippable frame. - // Valid values are between 0 to 15, inclusive. - SkippableID int - - // SkippableSize is the length of the user data to skip following - // the header. - SkippableSize uint32 - - // HeaderSize is the raw size of the frame header. - // - // For normal frames, it includes the size of the magic number and - // the size of the header (per section 3.1.1.1). - // It does not include the size for any data blocks (section 3.1.1.2) nor - // the size for the trailing content checksum. - // - // For skippable frames, this counts the size of the magic number - // along with the size of the size field of the payload. - // It does not include the size of the skippable payload itself. - // The total frame size is the HeaderSize plus the SkippableSize. - HeaderSize int - - // First block information. - FirstBlock struct { - // OK will be set if first block could be decoded. - OK bool - - // Is this the last block of a frame? - Last bool - - // Is the data compressed? - // If true CompressedSize will be populated. - // Unfortunately DecompressedSize cannot be determined - // without decoding the blocks. - Compressed bool - - // DecompressedSize is the expected decompressed size of the block. - // Will be 0 if it cannot be determined. - DecompressedSize int - - // CompressedSize of the data in the block. - // Does not include the block header. - // Will be equal to DecompressedSize if not Compressed. - CompressedSize int - } - - // If set there is a checksum present for the block content. - // The checksum field at the end is always 4 bytes long. - HasCheckSum bool -} - -// Decode the header from the beginning of the stream. -// This will decode the frame header and the first block header if enough bytes are provided. -// It is recommended to provide at least HeaderMaxSize bytes. -// If the frame header cannot be read an error will be returned. -// If there isn't enough input, io.ErrUnexpectedEOF is returned. -// The FirstBlock.OK will indicate if enough information was available to decode the first block header. -func (h *Header) Decode(in []byte) error { - *h = Header{} - if len(in) < 4 { - return io.ErrUnexpectedEOF - } - h.HeaderSize += 4 - b, in := in[:4], in[4:] - if string(b) != frameMagic { - if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { - return ErrMagicMismatch - } - if len(in) < 4 { - return io.ErrUnexpectedEOF - } - h.HeaderSize += 4 - h.Skippable = true - h.SkippableID = int(b[0] & 0xf) - h.SkippableSize = binary.LittleEndian.Uint32(in) - return nil - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - if len(in) < 1 { - return io.ErrUnexpectedEOF - } - fhd, in := in[0], in[1:] - h.HeaderSize++ - h.SingleSegment = fhd&(1<<5) != 0 - h.HasCheckSum = fhd&(1<<2) != 0 - if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") - } - - if !h.SingleSegment { - if len(in) < 1 { - return io.ErrUnexpectedEOF - } - var wd byte - wd, in = in[0], in[1:] - h.HeaderSize++ - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - h.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - if len(in) < int(size) { - return io.ErrUnexpectedEOF - } - b, in = in[:size], in[size:] - h.HeaderSize += int(size) - switch len(b) { - case 1: - h.DictionaryID = uint32(b[0]) - case 2: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if h.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - - if fcsSize > 0 { - h.HasFCS = true - if len(in) < fcsSize { - return io.ErrUnexpectedEOF - } - b, in = in[:fcsSize], in[fcsSize:] - h.HeaderSize += int(fcsSize) - switch len(b) { - case 1: - h.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - } - - // Frame Header done, we will not fail from now on. - if len(in) < 3 { - return nil - } - tmp := in[:3] - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - h.FirstBlock.Last = bh&1 != 0 - blockType := blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - switch blockType { - case blockTypeReserved: - return nil - case blockTypeRLE: - h.FirstBlock.Compressed = true - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = 1 - case blockTypeCompressed: - h.FirstBlock.Compressed = true - h.FirstBlock.CompressedSize = cSize - case blockTypeRaw: - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = cSize - default: - panic("Invalid block type") - } - - h.FirstBlock.OK = true - return nil -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/decoder.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/decoder.go deleted file mode 100644 index f04aaa2..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/decoder.go +++ /dev/null @@ -1,948 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "context" - "encoding/binary" - "io" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -// Decoder provides decoding of zstandard streams. -// The decoder has been designed to operate without allocations after a warmup. -// This means that you should store the decoder for best performance. -// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. -// A decoder can safely be re-used even if the previous stream failed. -// To release the resources, you must call the Close() function on a decoder. -type Decoder struct { - o decoderOptions - - // Unreferenced decoders, ready for use. - decoders chan *blockDec - - // Current read position used for Reader functionality. - current decoderState - - // sync stream decoding - syncStream struct { - decodedFrame uint64 - br readerWrapper - enabled bool - inFrame bool - dstBuf []byte - } - - frame *frameDec - - // Custom dictionaries. - dicts map[uint32]*dict - - // streamWg is the waitgroup for all streams - streamWg sync.WaitGroup -} - -// decoderState is used for maintaining state when the decoder -// is used for streaming. -type decoderState struct { - // current block being written to stream. - decodeOutput - - // output in order to be written to stream. - output chan decodeOutput - - // cancel remaining output. - cancel context.CancelFunc - - // crc of current frame - crc *xxhash.Digest - - flushed bool -} - -var ( - // Check the interfaces we want to support. - _ = io.WriterTo(&Decoder{}) - _ = io.Reader(&Decoder{}) -) - -// NewReader creates a new decoder. -// A nil Reader can be provided in which case Reset can be used to start a decode. -// -// A Decoder can be used in two modes: -// -// 1) As a stream, or -// 2) For stateless decoding using DecodeAll. -// -// Only a single stream can be decoded concurrently, but the same decoder -// can run multiple concurrent stateless decodes. It is even possible to -// use stateless decodes while a stream is being decoded. -// -// The Reset function can be used to initiate a new stream, which is will considerably -// reduce the allocations normally caused by NewReader. -func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { - initPredefined() - var d Decoder - d.o.setDefault() - for _, o := range opts { - err := o(&d.o) - if err != nil { - return nil, err - } - } - d.current.crc = xxhash.New() - d.current.flushed = true - - if r == nil { - d.current.err = ErrDecoderNilInput - } - - // Transfer option dicts. - d.dicts = make(map[uint32]*dict, len(d.o.dicts)) - for _, dc := range d.o.dicts { - d.dicts[dc.id] = dc - } - d.o.dicts = nil - - // Create decoders - d.decoders = make(chan *blockDec, d.o.concurrent) - for i := 0; i < d.o.concurrent; i++ { - dec := newBlockDec(d.o.lowMem) - dec.localFrame = newFrameDec(d.o) - d.decoders <- dec - } - - if r == nil { - return &d, nil - } - return &d, d.Reset(r) -} - -// Read bytes from the decompressed stream into p. -// Returns the number of bytes written and any error that occurred. -// When the stream is done, io.EOF will be returned. -func (d *Decoder) Read(p []byte) (int, error) { - var n int - for { - if len(d.current.b) > 0 { - filled := copy(p, d.current.b) - p = p[filled:] - d.current.b = d.current.b[filled:] - n += filled - } - if len(p) == 0 { - break - } - if len(d.current.b) == 0 { - // We have an error and no more data - if d.current.err != nil { - break - } - if !d.nextBlock(n == 0) { - return n, d.current.err - } - } - } - if len(d.current.b) > 0 { - if debugDecoder { - println("returning", n, "still bytes left:", len(d.current.b)) - } - // Only return error at end of block - return n, nil - } - if d.current.err != nil { - d.drainOutput() - } - if debugDecoder { - println("returning", n, d.current.err, len(d.decoders)) - } - return n, d.current.err -} - -// Reset will reset the decoder the supplied stream after the current has finished processing. -// Note that this functionality cannot be used after Close has been called. -// Reset can be called with a nil reader to release references to the previous reader. -// After being called with a nil reader, no other operations than Reset or DecodeAll or Close -// should be used. -func (d *Decoder) Reset(r io.Reader) error { - if d.current.err == ErrDecoderClosed { - return d.current.err - } - - d.drainOutput() - - d.syncStream.br.r = nil - if r == nil { - d.current.err = ErrDecoderNilInput - if len(d.current.b) > 0 { - d.current.b = d.current.b[:0] - } - d.current.flushed = true - return nil - } - - // If bytes buffer and < 5MB, do sync decoding anyway. - if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { - bb2 := bb - if debugDecoder { - println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) - } - b := bb2.Bytes() - var dst []byte - if cap(d.syncStream.dstBuf) > 0 { - dst = d.syncStream.dstBuf[:0] - } - - dst, err := d.DecodeAll(b, dst) - if err == nil { - err = io.EOF - } - // Save output buffer - d.syncStream.dstBuf = dst - d.current.b = dst - d.current.err = err - d.current.flushed = true - if debugDecoder { - println("sync decode to", len(dst), "bytes, err:", err) - } - return nil - } - // Remove current block. - d.stashDecoder() - d.current.decodeOutput = decodeOutput{} - d.current.err = nil - d.current.flushed = false - d.current.d = nil - d.syncStream.dstBuf = nil - - // Ensure no-one else is still running... - d.streamWg.Wait() - if d.frame == nil { - d.frame = newFrameDec(d.o) - } - - if d.o.concurrent == 1 { - return d.startSyncDecoder(r) - } - - d.current.output = make(chan decodeOutput, d.o.concurrent) - ctx, cancel := context.WithCancel(context.Background()) - d.current.cancel = cancel - d.streamWg.Add(1) - go d.startStreamDecoder(ctx, r, d.current.output) - - return nil -} - -// drainOutput will drain the output until errEndOfStream is sent. -func (d *Decoder) drainOutput() { - if d.current.cancel != nil { - if debugDecoder { - println("cancelling current") - } - d.current.cancel() - d.current.cancel = nil - } - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) - } - d.decoders <- d.current.d - d.current.d = nil - d.current.b = nil - } - if d.current.output == nil || d.current.flushed { - println("current already flushed") - return - } - for v := range d.current.output { - if v.d != nil { - if debugDecoder { - printf("re-adding decoder %p", v.d) - } - d.decoders <- v.d - } - } - d.current.output = nil - d.current.flushed = true -} - -// WriteTo writes data to w until there's no more data to write or when an error occurs. -// The return value n is the number of bytes written. -// Any error encountered during the write is also returned. -func (d *Decoder) WriteTo(w io.Writer) (int64, error) { - var n int64 - for { - if len(d.current.b) > 0 { - n2, err2 := w.Write(d.current.b) - n += int64(n2) - if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { - d.current.err = err2 - } else if n2 != len(d.current.b) { - d.current.err = io.ErrShortWrite - } - } - if d.current.err != nil { - break - } - d.nextBlock(true) - } - err := d.current.err - if err != nil { - d.drainOutput() - } - if err == io.EOF { - err = nil - } - return n, err -} - -// DecodeAll allows stateless decoding of a blob of bytes. -// Output will be appended to dst, so if the destination size is known -// you can pre-allocate the destination slice to avoid allocations. -// DecodeAll can be used concurrently. -// The Decoder concurrency limits will be respected. -func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.decoders == nil { - return dst, ErrDecoderClosed - } - - // Grab a block decoder and frame decoder. - block := <-d.decoders - frame := block.localFrame - initialSize := len(dst) - defer func() { - if debugDecoder { - printf("re-adding decoder: %p", block) - } - frame.rawInput = nil - frame.bBuf = nil - if frame.history.decoders.br != nil { - frame.history.decoders.br.in = nil - } - d.decoders <- block - }() - frame.bBuf = input - - for { - frame.history.reset() - err := frame.reset(&frame.bBuf) - if err != nil { - if err == io.EOF { - if debugDecoder { - println("frame reset return EOF") - } - return dst, nil - } - return dst, err - } - if err = d.setDict(frame); err != nil { - return nil, err - } - if frame.WindowSize > d.o.maxWindowSize { - if debugDecoder { - println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) - } - return dst, ErrWindowSizeExceeded - } - if frame.FrameContentSize != fcsUnknown { - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { - if debugDecoder { - println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) - } - return dst, ErrDecoderSizeExceeded - } - if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { - if debugDecoder { - println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) - } - return dst, ErrDecoderSizeExceeded - } - if cap(dst)-len(dst) < int(frame.FrameContentSize) { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) - copy(dst2, dst) - dst = dst2 - } - } - - if cap(dst) == 0 && !d.o.limitToCap { - // Allocate len(input) * 2 by default if nothing is provided - // and we didn't get frame content size. - size := len(input) * 2 - // Cap to 1 MB. - if size > 1<<20 { - size = 1 << 20 - } - if uint64(size) > d.o.maxDecodedSize { - size = int(d.o.maxDecodedSize) - } - dst = make([]byte, 0, size) - } - - dst, err = frame.runDecoder(dst, block) - if err != nil { - return dst, err - } - if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { - return dst, ErrDecoderSizeExceeded - } - if len(frame.bBuf) == 0 { - if debugDecoder { - println("frame dbuf empty") - } - break - } - } - return dst, nil -} - -// nextBlock returns the next block. -// If an error occurs d.err will be set. -// Optionally the function can block for new output. -// If non-blocking mode is used the returned boolean will be false -// if no data was available without blocking. -func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.err != nil { - // Keep error state. - return false - } - d.current.b = d.current.b[:0] - - // SYNC: - if d.syncStream.enabled { - if !blocking { - return false - } - ok = d.nextBlockSync() - if !ok { - d.stashDecoder() - } - return ok - } - - //ASYNC: - d.stashDecoder() - if blocking { - d.current.decodeOutput, ok = <-d.current.output - } else { - select { - case d.current.decodeOutput, ok = <-d.current.output: - default: - return false - } - } - if !ok { - // This should not happen, so signal error state... - d.current.err = io.ErrUnexpectedEOF - return false - } - next := d.current.decodeOutput - if next.d != nil && next.d.async.newHist != nil { - d.current.crc.Reset() - } - if debugDecoder { - var tmp [4]byte - binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) - println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) - } - - if d.o.ignoreChecksum { - return true - } - - if len(next.b) > 0 { - d.current.crc.Write(next.b) - } - if next.err == nil && next.d != nil && next.d.hasCRC { - got := uint32(d.current.crc.Sum64()) - if got != next.d.checkCRC { - if debugDecoder { - printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) - } - d.current.err = ErrCRCMismatch - } else { - if debugDecoder { - printf("CRC ok %08x\n", got) - } - } - } - - return true -} - -func (d *Decoder) nextBlockSync() (ok bool) { - if d.current.d == nil { - d.current.d = <-d.decoders - } - for len(d.current.b) == 0 { - if !d.syncStream.inFrame { - d.frame.history.reset() - d.current.err = d.frame.reset(&d.syncStream.br) - if d.current.err == nil { - d.current.err = d.setDict(d.frame) - } - if d.current.err != nil { - return false - } - if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { - d.current.err = ErrDecoderSizeExceeded - return false - } - - d.syncStream.decodedFrame = 0 - d.syncStream.inFrame = true - } - d.current.err = d.frame.next(d.current.d) - if d.current.err != nil { - return false - } - d.frame.history.ensureBlock() - if debugDecoder { - println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) - } - histBefore := len(d.frame.history.b) - d.current.err = d.current.d.decodeBuf(&d.frame.history) - - if d.current.err != nil { - println("error after:", d.current.err) - return false - } - d.current.b = d.frame.history.b[histBefore:] - if debugDecoder { - println("history after:", len(d.frame.history.b)) - } - - // Check frame size (before CRC) - d.syncStream.decodedFrame += uint64(len(d.current.b)) - if d.syncStream.decodedFrame > d.frame.FrameContentSize { - if debugDecoder { - printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) - } - d.current.err = ErrFrameSizeExceeded - return false - } - - // Check FCS - if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { - if debugDecoder { - printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) - } - d.current.err = ErrFrameSizeMismatch - return false - } - - // Update/Check CRC - if d.frame.HasCheckSum { - if !d.o.ignoreChecksum { - d.frame.crc.Write(d.current.b) - } - if d.current.d.Last { - if !d.o.ignoreChecksum { - d.current.err = d.frame.checkCRC() - } else { - d.current.err = d.frame.consumeCRC() - } - if d.current.err != nil { - println("CRC error:", d.current.err) - return false - } - } - } - d.syncStream.inFrame = !d.current.d.Last - } - return true -} - -func (d *Decoder) stashDecoder() { - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } -} - -// Close will release all resources. -// It is NOT possible to reuse the decoder after this. -func (d *Decoder) Close() { - if d.current.err == ErrDecoderClosed { - return - } - d.drainOutput() - if d.current.cancel != nil { - d.current.cancel() - d.streamWg.Wait() - d.current.cancel = nil - } - if d.decoders != nil { - close(d.decoders) - for dec := range d.decoders { - dec.Close() - } - d.decoders = nil - } - if d.current.d != nil { - d.current.d.Close() - d.current.d = nil - } - d.current.err = ErrDecoderClosed -} - -// IOReadCloser returns the decoder as an io.ReadCloser for convenience. -// Any changes to the decoder will be reflected, so the returned ReadCloser -// can be reused along with the decoder. -// io.WriterTo is also supported by the returned ReadCloser. -func (d *Decoder) IOReadCloser() io.ReadCloser { - return closeWrapper{d: d} -} - -// closeWrapper wraps a function call as a closer. -type closeWrapper struct { - d *Decoder -} - -// WriteTo forwards WriteTo calls to the decoder. -func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { - return c.d.WriteTo(w) -} - -// Read forwards read calls to the decoder. -func (c closeWrapper) Read(p []byte) (n int, err error) { - return c.d.Read(p) -} - -// Close closes the decoder. -func (c closeWrapper) Close() error { - c.d.Close() - return nil -} - -type decodeOutput struct { - d *blockDec - b []byte - err error -} - -func (d *Decoder) startSyncDecoder(r io.Reader) error { - d.frame.history.reset() - d.syncStream.br = readerWrapper{r: r} - d.syncStream.inFrame = false - d.syncStream.enabled = true - d.syncStream.decodedFrame = 0 - return nil -} - -// Create Decoder: -// ASYNC: -// Spawn 3 go routines. -// 0: Read frames and decode block literals. -// 1: Decode sequences. -// 2: Execute sequences, send to output. -func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { - defer d.streamWg.Done() - br := readerWrapper{r: r} - - var seqDecode = make(chan *blockDec, d.o.concurrent) - var seqExecute = make(chan *blockDec, d.o.concurrent) - - // Async 1: Decode sequences... - go func() { - var hist history - var hasErr bool - - for block := range seqDecode { - if hasErr { - if block != nil { - seqExecute <- block - } - continue - } - if block.async.newHist != nil { - if debugDecoder { - println("Async 1: new history, recent:", block.async.newHist.recentOffsets) - } - hist.reset() - hist.decoders = block.async.newHist.decoders - hist.recentOffsets = block.async.newHist.recentOffsets - hist.windowSize = block.async.newHist.windowSize - if block.async.newHist.dict != nil { - hist.setDict(block.async.newHist.dict) - } - } - if block.err != nil || block.Type != blockTypeCompressed { - hasErr = block.err != nil - seqExecute <- block - continue - } - - hist.decoders.literals = block.async.literals - block.err = block.prepareSequences(block.async.seqData, &hist) - if debugDecoder && block.err != nil { - println("prepareSequences returned:", block.err) - } - hasErr = block.err != nil - if block.err == nil { - block.err = block.decodeSequences(&hist) - if debugDecoder && block.err != nil { - println("decodeSequences returned:", block.err) - } - hasErr = block.err != nil - // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] - block.async.seqSize = hist.decoders.seqSize - } - seqExecute <- block - } - close(seqExecute) - hist.reset() - }() - - var wg sync.WaitGroup - wg.Add(1) - - // Async 3: Execute sequences... - frameHistCache := d.frame.history.b - go func() { - var hist history - var decodedFrame uint64 - var fcs uint64 - var hasErr bool - for block := range seqExecute { - out := decodeOutput{err: block.err, d: block} - if block.err != nil || hasErr { - hasErr = true - output <- out - continue - } - if block.async.newHist != nil { - if debugDecoder { - println("Async 2: new history") - } - hist.reset() - hist.windowSize = block.async.newHist.windowSize - hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer - if block.async.newHist.dict != nil { - hist.setDict(block.async.newHist.dict) - } - - if cap(hist.b) < hist.allocFrameBuffer { - if cap(frameHistCache) >= hist.allocFrameBuffer { - hist.b = frameHistCache - } else { - hist.b = make([]byte, 0, hist.allocFrameBuffer) - println("Alloc history sized", hist.allocFrameBuffer) - } - } - hist.b = hist.b[:0] - fcs = block.async.fcs - decodedFrame = 0 - } - do := decodeOutput{err: block.err, d: block} - switch block.Type { - case blockTypeRLE: - if debugDecoder { - println("add rle block length:", block.RLESize) - } - - if cap(block.dst) < int(block.RLESize) { - if block.lowMem { - block.dst = make([]byte, block.RLESize) - } else { - block.dst = make([]byte, maxCompressedBlockSize) - } - } - block.dst = block.dst[:block.RLESize] - v := block.data[0] - for i := range block.dst { - block.dst[i] = v - } - hist.append(block.dst) - do.b = block.dst - case blockTypeRaw: - if debugDecoder { - println("add raw block length:", len(block.data)) - } - hist.append(block.data) - do.b = block.data - case blockTypeCompressed: - if debugDecoder { - println("execute with history length:", len(hist.b), "window:", hist.windowSize) - } - hist.decoders.seqSize = block.async.seqSize - hist.decoders.literals = block.async.literals - do.err = block.executeSequences(&hist) - hasErr = do.err != nil - if debugDecoder && hasErr { - println("executeSequences returned:", do.err) - } - do.b = block.dst - } - if !hasErr { - decodedFrame += uint64(len(do.b)) - if decodedFrame > fcs { - println("fcs exceeded", block.Last, fcs, decodedFrame) - do.err = ErrFrameSizeExceeded - hasErr = true - } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { - do.err = ErrFrameSizeMismatch - hasErr = true - } else { - if debugDecoder { - println("fcs ok", block.Last, fcs, decodedFrame) - } - } - } - output <- do - } - close(output) - frameHistCache = hist.b - wg.Done() - if debugDecoder { - println("decoder goroutines finished") - } - hist.reset() - }() - - var hist history -decodeStream: - for { - var hasErr bool - hist.reset() - decodeBlock := func(block *blockDec) { - if hasErr { - if block != nil { - seqDecode <- block - } - return - } - if block.err != nil || block.Type != blockTypeCompressed { - hasErr = block.err != nil - seqDecode <- block - return - } - - remain, err := block.decodeLiterals(block.data, &hist) - block.err = err - hasErr = block.err != nil - if err == nil { - block.async.literals = hist.decoders.literals - block.async.seqData = remain - } else if debugDecoder { - println("decodeLiterals error:", err) - } - seqDecode <- block - } - frame := d.frame - if debugDecoder { - println("New frame...") - } - var historySent bool - frame.history.reset() - err := frame.reset(&br) - if debugDecoder && err != nil { - println("Frame decoder returned", err) - } - if err == nil { - err = d.setDict(frame) - } - if err == nil && d.frame.WindowSize > d.o.maxWindowSize { - if debugDecoder { - println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) - } - - err = ErrDecoderSizeExceeded - } - if err != nil { - select { - case <-ctx.Done(): - case dec := <-d.decoders: - dec.sendErr(err) - decodeBlock(dec) - } - break decodeStream - } - - // Go through all blocks of the frame. - for { - var dec *blockDec - select { - case <-ctx.Done(): - break decodeStream - case dec = <-d.decoders: - // Once we have a decoder, we MUST return it. - } - err := frame.next(dec) - if !historySent { - h := frame.history - if debugDecoder { - println("Alloc History:", h.allocFrameBuffer) - } - hist.reset() - if h.dict != nil { - hist.setDict(h.dict) - } - dec.async.newHist = &h - dec.async.fcs = frame.FrameContentSize - historySent = true - } else { - dec.async.newHist = nil - } - if debugDecoder && err != nil { - println("next block returned error:", err) - } - dec.err = err - dec.hasCRC = false - if dec.Last && frame.HasCheckSum && err == nil { - crc, err := frame.rawInput.readSmall(4) - if len(crc) < 4 { - if err == nil { - err = io.ErrUnexpectedEOF - - } - println("CRC missing?", err) - dec.err = err - } else { - dec.checkCRC = binary.LittleEndian.Uint32(crc) - dec.hasCRC = true - if debugDecoder { - printf("found crc to check: %08x\n", dec.checkCRC) - } - } - } - err = dec.err - last := dec.Last - decodeBlock(dec) - if err != nil { - break decodeStream - } - if last { - break - } - } - } - close(seqDecode) - wg.Wait() - hist.reset() - d.frame.history.b = frameHistCache -} - -func (d *Decoder) setDict(frame *frameDec) (err error) { - dict, ok := d.dicts[frame.DictionaryID] - if ok { - if debugDecoder { - println("setting dict", frame.DictionaryID) - } - frame.history.setDict(dict) - } else if frame.DictionaryID != 0 { - // A zero or missing dictionary id is ambiguous: - // either dictionary zero, or no dictionary. In particular, - // zstd --patch-from uses this id for the source file, - // so only return an error if the dictionary id is not zero. - err = ErrUnknownDictionary - } - return err -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/decoder_options.go deleted file mode 100644 index 774c5f0..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math/bits" - "runtime" -) - -// DOption is an option for creating a decoder. -type DOption func(*decoderOptions) error - -// options retains accumulated state of multiple options. -type decoderOptions struct { - lowMem bool - concurrent int - maxDecodedSize uint64 - maxWindowSize uint64 - dicts []*dict - ignoreChecksum bool - limitToCap bool - decodeBufsBelow int -} - -func (o *decoderOptions) setDefault() { - *o = decoderOptions{ - // use less ram: true for now, but may change. - lowMem: true, - concurrent: runtime.GOMAXPROCS(0), - maxWindowSize: MaxWindowSize, - decodeBufsBelow: 128 << 10, - } - if o.concurrent > 4 { - o.concurrent = 4 - } - o.maxDecodedSize = 64 << 30 -} - -// WithDecoderLowmem will set whether to use a lower amount of memory, -// but possibly have to allocate more while running. -func WithDecoderLowmem(b bool) DOption { - return func(o *decoderOptions) error { o.lowMem = b; return nil } -} - -// WithDecoderConcurrency sets the number of created decoders. -// When decoding block with DecodeAll, this will limit the number -// of possible concurrently running decodes. -// When decoding streams, this will limit the number of -// inflight blocks. -// When decoding streams and setting maximum to 1, -// no async decoding will be done. -// When a value of 0 is provided GOMAXPROCS will be used. -// By default this will be set to 4 or GOMAXPROCS, whatever is lower. -func WithDecoderConcurrency(n int) DOption { - return func(o *decoderOptions) error { - if n < 0 { - return errors.New("concurrency must be at least 1") - } - if n == 0 { - o.concurrent = runtime.GOMAXPROCS(0) - } else { - o.concurrent = n - } - return nil - } -} - -// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory -// non-streaming operations or maximum window size for streaming operations. -// This can be used to control memory usage of potentially hostile content. -// Maximum is 1 << 63 bytes. Default is 64GiB. -func WithDecoderMaxMemory(n uint64) DOption { - return func(o *decoderOptions) error { - if n == 0 { - return errors.New("WithDecoderMaxMemory must be at least 1") - } - if n > 1<<63 { - return errors.New("WithDecoderMaxmemory must be less than 1 << 63") - } - o.maxDecodedSize = n - return nil - } -} - -// WithDecoderDicts allows to register one or more dictionaries for the decoder. -// -// Each slice in dict must be in the [dictionary format] produced by -// "zstd --train" from the Zstandard reference implementation. -// -// If several dictionaries with the same ID are provided, the last one will be used. -// -// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format -func WithDecoderDicts(dicts ...[]byte) DOption { - return func(o *decoderOptions) error { - for _, b := range dicts { - d, err := loadDict(b) - if err != nil { - return err - } - o.dicts = append(o.dicts, d) - } - return nil - } -} - -// WithDecoderDictRaw registers a dictionary that may be used by the decoder. -// The slice content can be arbitrary data. -func WithDecoderDictRaw(id uint32, content []byte) DOption { - return func(o *decoderOptions) error { - if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { - return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) - } - o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) - return nil - } -} - -// WithDecoderMaxWindow allows to set a maximum window size for decodes. -// This allows rejecting packets that will cause big memory usage. -// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. -// If WithDecoderMaxMemory is set to a lower value, that will be used. -// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. -func WithDecoderMaxWindow(size uint64) DOption { - return func(o *decoderOptions) error { - if size < MinWindowSize { - return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") - } - if size > (1<<41)+7*(1<<38) { - return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") - } - o.maxWindowSize = size - return nil - } -} - -// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, -// or any size set in WithDecoderMaxMemory. -// This can be used to limit decoding to a specific maximum output size. -// Disabled by default. -func WithDecodeAllCapLimit(b bool) DOption { - return func(o *decoderOptions) error { - o.limitToCap = b - return nil - } -} - -// WithDecodeBuffersBelow will fully decode readers that have a -// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. -// This typically uses less allocations but will have the full decompressed object in memory. -// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. -// Default is 128KiB. -func WithDecodeBuffersBelow(size int) DOption { - return func(o *decoderOptions) error { - o.decodeBufsBelow = size - return nil - } -} - -// IgnoreChecksum allows to forcibly ignore checksum checking. -func IgnoreChecksum(b bool) DOption { - return func(o *decoderOptions) error { - o.ignoreChecksum = b - return nil - } -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/dict.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/dict.go deleted file mode 100644 index 8d5567f..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/dict.go +++ /dev/null @@ -1,534 +0,0 @@ -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math" - "sort" - - "github.com/klauspost/compress/huff0" -) - -type dict struct { - id uint32 - - litEnc *huff0.Scratch - llDec, ofDec, mlDec sequenceDec - offsets [3]int - content []byte -} - -const dictMagic = "\x37\xa4\x30\xec" - -// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. -const dictMaxLength = 1 << 31 - -// ID returns the dictionary id or 0 if d is nil. -func (d *dict) ID() uint32 { - if d == nil { - return 0 - } - return d.id -} - -// ContentSize returns the dictionary content size or 0 if d is nil. -func (d *dict) ContentSize() int { - if d == nil { - return 0 - } - return len(d.content) -} - -// Content returns the dictionary content. -func (d *dict) Content() []byte { - if d == nil { - return nil - } - return d.content -} - -// Offsets returns the initial offsets. -func (d *dict) Offsets() [3]int { - if d == nil { - return [3]int{} - } - return d.offsets -} - -// LitEncoder returns the literal encoder. -func (d *dict) LitEncoder() *huff0.Scratch { - if d == nil { - return nil - } - return d.litEnc -} - -// Load a dictionary as described in -// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format -func loadDict(b []byte) (*dict, error) { - // Check static field size. - if len(b) <= 8+(3*4) { - return nil, io.ErrUnexpectedEOF - } - d := dict{ - llDec: sequenceDec{fse: &fseDecoder{}}, - ofDec: sequenceDec{fse: &fseDecoder{}}, - mlDec: sequenceDec{fse: &fseDecoder{}}, - } - if string(b[:4]) != dictMagic { - return nil, ErrMagicMismatch - } - d.id = binary.LittleEndian.Uint32(b[4:8]) - if d.id == 0 { - return nil, errors.New("dictionaries cannot have ID 0") - } - - // Read literal table - var err error - d.litEnc, b, err = huff0.ReadTable(b[8:], nil) - if err != nil { - return nil, fmt.Errorf("loading literal table: %w", err) - } - d.litEnc.Reuse = huff0.ReusePolicyMust - - br := byteReader{ - b: b, - off: 0, - } - readDec := func(i tableIndex, dec *fseDecoder) error { - if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { - return err - } - if br.overread() { - return io.ErrUnexpectedEOF - } - err = dec.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debugDecoder || debugEncoder { - println("Read table ok", "symbolLen:", dec.symbolLen) - } - // Set decoders as predefined so they aren't reused. - dec.preDefined = true - return nil - } - - if err := readDec(tableOffsets, d.ofDec.fse); err != nil { - return nil, err - } - if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { - return nil, err - } - if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { - return nil, err - } - if br.remain() < 12 { - return nil, io.ErrUnexpectedEOF - } - - d.offsets[0] = int(br.Uint32()) - br.advance(4) - d.offsets[1] = int(br.Uint32()) - br.advance(4) - d.offsets[2] = int(br.Uint32()) - br.advance(4) - if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { - return nil, errors.New("invalid offset in dictionary") - } - d.content = make([]byte, br.remain()) - copy(d.content, br.unread()) - if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { - return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) - } - - return &d, nil -} - -// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. -func InspectDictionary(b []byte) (interface { - ID() uint32 - ContentSize() int - Content() []byte - Offsets() [3]int - LitEncoder() *huff0.Scratch -}, error) { - initPredefined() - d, err := loadDict(b) - return d, err -} - -type BuildDictOptions struct { - // Dictionary ID. - ID uint32 - - // Content to use to create dictionary tables. - Contents [][]byte - - // History to use for all blocks. - History []byte - - // Offsets to use. - Offsets [3]int - - // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier. - // See https://github.com/facebook/zstd/issues/3724 - CompatV155 bool - - // Use the specified encoder level. - // The dictionary will be built using the specified encoder level, - // which will reflect speed and make the dictionary tailored for that level. - // If not set SpeedBestCompression will be used. - Level EncoderLevel - - // DebugOut will write stats and other details here if set. - DebugOut io.Writer -} - -func BuildDict(o BuildDictOptions) ([]byte, error) { - initPredefined() - hist := o.History - contents := o.Contents - debug := o.DebugOut != nil - println := func(args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprintln(o.DebugOut, args...) - } - } - printf := func(s string, args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprintf(o.DebugOut, s, args...) - } - } - print := func(args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprint(o.DebugOut, args...) - } - } - - if int64(len(hist)) > dictMaxLength { - return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength)) - } - if len(hist) < 8 { - return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8) - } - if len(contents) == 0 { - return nil, errors.New("no content provided") - } - d := dict{ - id: o.ID, - litEnc: nil, - llDec: sequenceDec{}, - ofDec: sequenceDec{}, - mlDec: sequenceDec{}, - offsets: o.Offsets, - content: hist, - } - block := blockEnc{lowMem: false} - block.init() - enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}}) - if o.Level != 0 { - eOpts := encoderOptions{ - level: o.Level, - blockSize: maxMatchLen, - windowSize: maxMatchLen, - dict: &d, - lowMem: false, - } - enc = eOpts.encoder() - } else { - o.Level = SpeedBestCompression - } - var ( - remain [256]int - ll [256]int - ml [256]int - of [256]int - ) - addValues := func(dst *[256]int, src []byte) { - for _, v := range src { - dst[v]++ - } - } - addHist := func(dst *[256]int, src *[256]uint32) { - for i, v := range src { - dst[i] += int(v) - } - } - seqs := 0 - nUsed := 0 - litTotal := 0 - newOffsets := make(map[uint32]int, 1000) - for _, b := range contents { - block.reset(nil) - if len(b) < 8 { - continue - } - nUsed++ - enc.Reset(&d, true) - enc.Encode(&block, b) - addValues(&remain, block.literals) - litTotal += len(block.literals) - seqs += len(block.sequences) - block.genCodes() - addHist(&ll, block.coders.llEnc.Histogram()) - addHist(&ml, block.coders.mlEnc.Histogram()) - addHist(&of, block.coders.ofEnc.Histogram()) - for i, seq := range block.sequences { - if i > 3 { - break - } - offset := seq.offset - if offset == 0 { - continue - } - if offset > 3 { - newOffsets[offset-3]++ - } else { - newOffsets[uint32(o.Offsets[offset-1])]++ - } - } - } - // Find most used offsets. - var sortedOffsets []uint32 - for k := range newOffsets { - sortedOffsets = append(sortedOffsets, k) - } - sort.Slice(sortedOffsets, func(i, j int) bool { - a, b := sortedOffsets[i], sortedOffsets[j] - if a == b { - // Prefer the longer offset - return sortedOffsets[i] > sortedOffsets[j] - } - return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]] - }) - if len(sortedOffsets) > 3 { - if debug { - print("Offsets:") - for i, v := range sortedOffsets { - if i > 20 { - break - } - printf("[%d: %d],", v, newOffsets[v]) - } - println("") - } - - sortedOffsets = sortedOffsets[:3] - } - for i, v := range sortedOffsets { - o.Offsets[i] = int(v) - } - if debug { - println("New repeat offsets", o.Offsets) - } - - if nUsed == 0 || seqs == 0 { - return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs) - } - if debug { - println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal) - } - if seqs/nUsed < 512 { - // Use 512 as minimum. - nUsed = seqs / 512 - } - copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { - hist := dst.Histogram() - var maxSym uint8 - var maxCount int - var fakeLength int - for i, v := range src { - if v > 0 { - v = v / nUsed - if v == 0 { - v = 1 - } - } - if v > maxCount { - maxCount = v - } - if v != 0 { - maxSym = uint8(i) - } - fakeLength += v - hist[i] = uint32(v) - } - dst.HistogramFinished(maxSym, maxCount) - dst.reUsed = false - dst.useRLE = false - err := dst.normalizeCount(fakeLength) - if err != nil { - return nil, err - } - if debug { - println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength) - } - return dst.writeCount(nil) - } - if debug { - print("Literal lengths: ") - } - llTable, err := copyHist(block.coders.llEnc, &ll) - if err != nil { - return nil, err - } - if debug { - print("Match lengths: ") - } - mlTable, err := copyHist(block.coders.mlEnc, &ml) - if err != nil { - return nil, err - } - if debug { - print("Offsets: ") - } - ofTable, err := copyHist(block.coders.ofEnc, &of) - if err != nil { - return nil, err - } - - // Literal table - avgSize := litTotal - if avgSize > huff0.BlockSizeMax/2 { - avgSize = huff0.BlockSizeMax / 2 - } - huffBuff := make([]byte, 0, avgSize) - // Target size - div := litTotal / avgSize - if div < 1 { - div = 1 - } - if debug { - println("Huffman weights:") - } - for i, n := range remain[:] { - if n > 0 { - n = n / div - // Allow all entries to be represented. - if n == 0 { - n = 1 - } - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - if debug { - printf("[%d: %d], ", i, n) - } - } - } - if o.CompatV155 && remain[255]/div == 0 { - huffBuff = append(huffBuff, 255) - } - scratch := &huff0.Scratch{TableLog: 11} - for tries := 0; tries < 255; tries++ { - scratch = &huff0.Scratch{TableLog: 11} - _, _, err = huff0.Compress1X(huffBuff, scratch) - if err == nil { - break - } - if debug { - printf("Try %d: Huffman error: %v\n", tries+1, err) - } - huffBuff = huffBuff[:0] - if tries == 250 { - if debug { - println("Huffman: Bailing out with predefined table") - } - - // Bail out.... Just generate something - huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) - for i := 0; i < 128; i++ { - huffBuff = append(huffBuff, byte(i)) - } - continue - } - if errors.Is(err, huff0.ErrIncompressible) { - // Try truncating least common. - for i, n := range remain[:] { - if n > 0 { - n = n / (div * (i + 1)) - if n > 0 { - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - } - } - } - if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 { - huffBuff = append(huffBuff, 255) - } - if len(huffBuff) == 0 { - huffBuff = append(huffBuff, 0, 255) - } - } - if errors.Is(err, huff0.ErrUseRLE) { - for i, n := range remain[:] { - n = n / (div * (i + 1)) - // Allow all entries to be represented. - if n == 0 { - n = 1 - } - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - } - } - } - - var out bytes.Buffer - out.Write([]byte(dictMagic)) - out.Write(binary.LittleEndian.AppendUint32(nil, o.ID)) - out.Write(scratch.OutTable) - if debug { - println("huff table:", len(scratch.OutTable), "bytes") - println("of table:", len(ofTable), "bytes") - println("ml table:", len(mlTable), "bytes") - println("ll table:", len(llTable), "bytes") - } - out.Write(ofTable) - out.Write(mlTable) - out.Write(llTable) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0]))) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1]))) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2]))) - out.Write(hist) - if debug { - _, err := loadDict(out.Bytes()) - if err != nil { - panic(err) - } - i, err := InspectDictionary(out.Bytes()) - if err != nil { - panic(err) - } - println("ID:", i.ID()) - println("Content size:", i.ContentSize()) - println("Encoder:", i.LitEncoder() != nil) - println("Offsets:", i.Offsets()) - var totalSize int - for _, b := range contents { - totalSize += len(b) - } - - encWith := func(opts ...EOption) int { - enc, err := NewWriter(nil, opts...) - if err != nil { - panic(err) - } - defer enc.Close() - var dst []byte - var totalSize int - for _, b := range contents { - dst = enc.EncodeAll(b, dst[:0]) - totalSize += len(dst) - } - return totalSize - } - plain := encWith(WithEncoderLevel(o.Level)) - withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes())) - println("Input size:", totalSize) - println("Plain Compressed:", plain) - println("Dict Compressed:", withDict) - println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)") - } - return out.Bytes(), nil -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_base.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_base.go deleted file mode 100644 index 5ca4603..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ /dev/null @@ -1,173 +0,0 @@ -package zstd - -import ( - "fmt" - "math/bits" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -const ( - dictShardBits = 6 -) - -type fastBase struct { - // cur is the offset at the start of hist - cur int32 - // maximum offset. Should be at least 2x block size. - maxMatchOff int32 - bufferReset int32 - hist []byte - crc *xxhash.Digest - tmp [8]byte - blk *blockEnc - lastDictID uint32 - lowMem bool -} - -// CRC returns the underlying CRC writer. -func (e *fastBase) CRC() *xxhash.Digest { - return e.crc -} - -// AppendCRC will append the CRC to the destination slice and return it. -func (e *fastBase) AppendCRC(dst []byte) []byte { - crc := e.crc.Sum(e.tmp[:0]) - dst = append(dst, crc[7], crc[6], crc[5], crc[4]) - return dst -} - -// WindowSize returns the window size of the encoder, -// or a window size small enough to contain the input size, if > 0. -func (e *fastBase) WindowSize(size int64) int32 { - if size > 0 && size < int64(e.maxMatchOff) { - b := int32(1) << uint(bits.Len(uint(size))) - // Keep minimum window. - if b < 1024 { - b = 1024 - } - return b - } - return e.maxMatchOff -} - -// Block returns the current block. -func (e *fastBase) Block() *blockEnc { - return e.blk -} - -func (e *fastBase) addBlock(src []byte) int32 { - if debugAsserts && e.cur > e.bufferReset { - panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) - } - // check if we have space already - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - e.ensureHist(len(src)) - } else { - if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { - panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) - } - // Move down - offset := int32(len(e.hist)) - e.maxMatchOff - copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:e.maxMatchOff] - } - } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} - -// ensureHist will ensure that history can keep at least this many bytes. -func (e *fastBase) ensureHist(n int) { - if cap(e.hist) >= n { - return - } - l := e.maxMatchOff - if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { - l += maxCompressedBlockSize - } else { - l += e.maxMatchOff - } - // Make it at least 1MB. - if l < 1<<20 && !e.lowMem { - l = 1 << 20 - } - // Make it at least the requested size. - if l < int32(n) { - l = int32(n) - } - e.hist = make([]byte, 0, l) -} - -// useBlock will replace the block with the provided one, -// but transfer recent offsets from the previous. -func (e *fastBase) UseBlock(enc *blockEnc) { - enc.reset(e.blk) - e.blk = enc -} - -func (e *fastBase) matchlen(s, t int32, src []byte) int32 { - if debugAsserts { - if s < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if t < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if s-t > e.maxMatchOff { - err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) - panic(err) - } - if len(src)-int(s) > maxCompressedBlockSize { - panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) - } - } - return int32(matchLen(src[s:], src[t:])) -} - -// Reset the encoding table. -func (e *fastBase) resetBase(d *dict, singleBlock bool) { - if e.blk == nil { - e.blk = &blockEnc{lowMem: e.lowMem} - e.blk.init() - } else { - e.blk.reset(nil) - } - e.blk.initNewEncode() - if e.crc == nil { - e.crc = xxhash.New() - } else { - e.crc.Reset() - } - e.blk.dictLitEnc = nil - if d != nil { - low := e.lowMem - if singleBlock { - e.lowMem = true - } - e.ensureHist(d.ContentSize() + maxCompressedBlockSize) - e.lowMem = low - } - - // We offset current position so everything will be out of reach. - // If above reset line, history will be purged. - if e.cur < e.bufferReset { - e.cur += e.maxMatchOff + int32(len(e.hist)) - } - e.hist = e.hist[:0] - if d != nil { - // Set offsets (currently not used) - for i, off := range d.offsets { - e.blk.recentOffsets[i] = uint32(off) - e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] - } - // Transfer litenc. - e.blk.dictLitEnc = d.litEnc - e.hist = append(e.hist, d.content...) - } -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_best.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_best.go deleted file mode 100644 index 858f8f4..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ /dev/null @@ -1,531 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "fmt" - - "github.com/klauspost/compress" -) - -const ( - bestLongTableBits = 22 // Bits used in the long match table - bestLongTableSize = 1 << bestLongTableBits // Size of the table - bestLongLen = 8 // Bytes used for table hash - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - bestShortTableBits = 18 // Bits used in the short match table - bestShortTableSize = 1 << bestShortTableBits // Size of the table - bestShortLen = 4 // Bytes used for table hash - -) - -type match struct { - offset int32 - s int32 - length int32 - rep int32 - est int32 -} - -const highScore = maxMatchLen * 8 - -// estBits will estimate output bits from predefined tables. -func (m *match) estBits(bitsPerByte int32) { - mlc := mlCode(uint32(m.length - zstdMinMatch)) - var ofc uint8 - if m.rep < 0 { - ofc = ofCode(uint32(m.s-m.offset) + 3) - } else { - ofc = ofCode(uint32(m.rep)) - } - // Cost, excluding - ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] - - // Add cost of match encoding... - m.est = int32(ofTT.outBits + mlTT.outBits) - m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) - // Subtract savings compared to literal encoding... - m.est -= (m.length * bitsPerByte) >> 10 - if m.est > 0 { - // Unlikely gain.. - m.length = 0 - m.est = highScore - } -} - -// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type bestFastEncoder struct { - fastBase - table [bestShortTableSize]prevEntry - longTable [bestLongTableSize]prevEntry - dictTable []prevEntry - dictLongTable []prevEntry -} - -// Encode improves compression... -func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 4 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [bestShortTableSize]prevEntry{} - e.longTable = [bestLongTableSize]prevEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - v2 := e.table[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.table[i] = prevEntry{ - offset: v, - prev: v2, - } - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Use this to estimate literal cost. - // Scaled by 10 bits. - bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) - // Huffman can never go < 1 bit/byte - if bitsPerByte < 1024 { - bitsPerByte = 1024 - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - const kSearchStrength = 10 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - offset3 := int32(blk.recentOffsets[2]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - const goodEnough = 250 - - cv := load6432(src, s) - - nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) - nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - // Set m to a match at offset if it looks like that will improve compression. - improve := func(m *match, offset int32, s int32, first uint32, rep int32) { - delta := s - offset - if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { - return - } - if debugAsserts { - if offset >= s { - panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) - } - if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { - panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) - } - } - // Try to quick reject if we already have a long match. - if m.length > 16 { - left := len(src) - int(m.s+m.length) - // If we are too close to the end, keep as is. - if left <= 0 { - return - } - checkLen := m.length - (s - m.s) - 8 - if left > 2 && checkLen > 4 { - // Check 4 bytes, 4 bytes from the end of the current match. - a := load3232(src, offset+checkLen) - b := load3232(src, s+checkLen) - if a != b { - return - } - } - } - l := 4 + e.matchlen(s+4, offset+4, src) - if rep < 0 { - // Extend candidate match backwards as far as possible. - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { - s-- - offset-- - l++ - } - } - - cand := match{offset: offset, s: s, length: l, rep: rep} - cand.estBits(bitsPerByte) - if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { - *m = cand - } - } - - best := match{s: s, est: highScore} - improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) - improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) - improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) - improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) - - if canRepeat && best.length < goodEnough { - if s == nextEmit { - // Check repeats straight after a match. - improve(&best, s-offset2, s, uint32(cv), 1|4) - improve(&best, s-offset3, s, uint32(cv), 2|4) - if offset1 > 1 { - improve(&best, s-(offset1-1), s, uint32(cv), 3|4) - } - } - - // If either no match or a non-repeat match, check at + 1 - if best.rep <= 0 { - cv32 := uint32(cv >> 8) - spp := s + 1 - improve(&best, spp-offset1, spp, cv32, 1) - improve(&best, spp-offset2, spp, cv32, 2) - improve(&best, spp-offset3, spp, cv32, 3) - if best.rep < 0 { - cv32 = uint32(cv >> 24) - spp += 2 - improve(&best, spp-offset1, spp, cv32, 1) - improve(&best, spp-offset2, spp, cv32, 2) - improve(&best, spp-offset3, spp, cv32, 3) - } - } - } - // Load next and check... - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} - - // Look far ahead, unless we have a really long match already... - if best.length < goodEnough { - // No match found, move forward on input, no need to check forward... - if best.length < 4 { - s += 1 + (s-nextEmit)>>(kSearchStrength-1) - if s >= sLimit { - break encodeLoop - } - continue - } - - candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] - cv = load6432(src, s+1) - cv2 := load6432(src, s+2) - candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] - candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] - - // Short at s+1 - improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) - // Long at s+1, s+2 - improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) - improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) - improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) - improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) - if false { - // Short at s+3. - // Too often worse... - improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) - } - - // Start check at a fixed offset to allow for a few mismatches. - // For this compression level 2 yields the best results. - // We cannot do this if we have already indexed this position. - const skipBeginning = 2 - if best.s > s-skipBeginning { - // See if we can find a better match by checking where the current best ends. - // Use that offset to see if we can find a better full match. - if sAt := best.s + best.length; sAt < sLimit { - nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) - candidateEnd := e.longTable[nextHashL] - - if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { - improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { - improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - } - } - } - } - } - - if debugAsserts { - if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { - panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) - } - } - - // We have a match, we can store the forward value - if best.rep > 0 { - var seq seq - seq.matchLen = uint32(best.length - zstdMinMatch) - if debugAsserts && s < nextEmit { - panic("s < nextEmit") - } - addLiterals(&seq, best.s) - - // Repeat. If bit 4 is set, this is a non-lit repeat. - seq.offset = uint32(best.rep & 3) - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index old s + 1 -> s - 1 - index0 := s + 1 - s = best.s + best.length - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, best.length) - } - break encodeLoop - } - // Index skipped... - off := index0 + e.cur - for index0 < s { - cv0 := load6432(src, index0) - h0 := hashLen(cv0, bestLongTableBits, bestLongLen) - h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - off++ - index0++ - } - switch best.rep { - case 2, 4 | 1: - offset1, offset2 = offset2, offset1 - case 3, 4 | 2: - offset1, offset2, offset3 = offset3, offset1, offset2 - case 4 | 3: - offset1, offset2, offset3 = offset1-1, offset1, offset2 - } - continue - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - index0 := s + 1 - s = best.s - t := best.offset - offset1, offset2, offset3 = s-t, offset1, offset2 - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && int(offset1) > len(src) { - panic("invalid offset") - } - - // Write our sequence - var seq seq - l := best.length - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index old s + 1 -> s - 1 - for index0 < s { - cv0 := load6432(src, index0) - h0 := hashLen(cv0, bestLongTableBits, bestLongLen) - h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - index0++ - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - blk.recentOffsets[2] = uint32(offset3) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Reset will reset and set a dictionary if not nil -func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]prevEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = bestShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 - nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 - nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 - nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 - e.dictTable[nextHash] = prevEntry{ - prev: e.dictTable[nextHash].offset, - offset: i, - } - e.dictTable[nextHash1] = prevEntry{ - prev: e.dictTable[nextHash1].offset, - offset: i + 1, - } - e.dictTable[nextHash2] = prevEntry{ - prev: e.dictTable[nextHash2].offset, - offset: i + 2, - } - e.dictTable[nextHash3] = prevEntry{ - prev: e.dictTable[nextHash3].offset, - offset: i + 3, - } - } - e.lastDictID = d.id - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hashLen(cv, bestLongTableBits, bestLongLen) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hashLen(cv, bestLongTableBits, bestLongLen) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - } - // Reset table to initial state - copy(e.longTable[:], e.dictLongTable) - - e.cur = e.maxMatchOff - // Reset table to initial state - copy(e.table[:], e.dictTable) -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_better.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_better.go deleted file mode 100644 index 8582f31..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ /dev/null @@ -1,1242 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - betterLongTableBits = 19 // Bits used in the long match table - betterLongTableSize = 1 << betterLongTableBits // Size of the table - betterLongLen = 8 // Bytes used for table hash - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - betterShortTableBits = 13 // Bits used in the short match table - betterShortTableSize = 1 << betterShortTableBits // Size of the table - betterShortLen = 5 // Bytes used for table hash - - betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table - betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard - - betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table - betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard -) - -type prevEntry struct { - offset int32 - prev int32 -} - -// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type betterFastEncoder struct { - fastBase - table [betterShortTableSize]tableEntry - longTable [betterLongTableSize]prevEntry -} - -type betterFastEncoderDict struct { - betterFastEncoder - dictTable []tableEntry - dictLongTable []prevEntry - shortTableShardDirty [betterShortTableShardCnt]bool - longTableShardDirty [betterLongTableShardCnt]bool - allDirty bool -} - -// Encode improves compression... -func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [betterShortTableSize]tableEntry{} - e.longTable = [betterLongTableSize]prevEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s + repOff - s += lenght + repOff - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - index0 := s + repOff2 - s += lenght + repOff2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // Try to find a better match by searching for a long match at the end of the current best match - if s+matched < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is around 3 bytes, but depends on input. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 3 - - nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - s2 := s + skipBeginning - cv := load3232(src, s2) - candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched + skipBeginning - if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - s = s2 - matched = matchedNext - if debugMatches { - println("long match at end-of-match") - } - } - } - - // Check prev long... - if true { - coffsetL = candidateL.prev - e.cur - matched + skipBeginning - if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - s = s2 - matched = matchedNext - if debugMatches { - println("prev long match at end-of-match") - } - } - } - } - } - // A match has been found. Update recent offsets. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Encode improves compression... -func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } - e.cur = e.maxMatchOff - e.allDirty = true - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.allDirty = true - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s + repOff - s += lenght + repOff - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - index0 := s + repOff2 - s += lenght + repOff2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // Try to find a better match by searching for a long match at the end of the current best match - if s+matched < sLimit { - nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - cv := load3232(src, s) - candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("long match at end-of-match") - } - } - } - - // Check prev long... - if true { - coffsetL = candidateL.prev - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("prev long match at end-of-match") - } - } - } - } - } - // A match has been found. Update recent offsets. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("betterFastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = betterShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 - nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 - nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 - nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - e.dictTable[nextHash2] = tableEntry{ - val: uint32(cv >> 16), - offset: i + 2, - } - e.dictTable[nextHash3] = tableEntry{ - val: uint32(cv >> 24), - offset: i + 3, - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hashLen(cv, betterLongTableBits, betterLongLen) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hashLen(cv, betterLongTableBits, betterLongLen) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Reset table to initial state - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterShortTableShardCnt - const shardSize = betterShortTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.table[:], e.dictTable) - for i := range e.shortTableShardDirty { - e.shortTableShardDirty[i] = false - } - } else { - for i := range e.shortTableShardDirty { - if !e.shortTableShardDirty[i] { - continue - } - - copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - e.shortTableShardDirty[i] = false - } - } - } - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterLongTableShardCnt - const shardSize = betterLongTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.longTable[:], e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - } else { - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) - e.longTableShardDirty[i] = false - } - } - } - e.cur = e.maxMatchOff - e.allDirty = false -} - -func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/betterLongTableShardSize] = true -} - -func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { - e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_dfast.go deleted file mode 100644 index a154c18..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ /dev/null @@ -1,1123 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - dFastLongTableBits = 17 // Bits used in the long match table - dFastLongTableSize = 1 << dFastLongTableBits // Size of the table - dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - dFastLongLen = 8 // Bytes used for table hash - - dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table - dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard - - dFastShortTableBits = tableBits // Bits used in the short match table - dFastShortTableSize = 1 << dFastShortTableBits // Size of the table - dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - dFastShortLen = 5 // Bytes used for table hash - -) - -type doubleFastEncoder struct { - fastEncoder - longTable [dFastLongTableSize]tableEntry -} - -type doubleFastEncoderDict struct { - fastEncoderDict - longTable [dFastLongTableSize]tableEntry - dictLongTable []tableEntry - longTableShardDirty [dLongTableShardCnt]bool -} - -// Encode mimmics functionality in zstd_dfast.c -func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [dFastShortTableSize]tableEntry{} - e.longTable = [dFastLongTableSize]tableEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += lenght + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 - e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 - e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - if e.cur >= e.bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - for { - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if len(blk.sequences) > 2 { - if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - l := int32(matchLen(src[s+4:], src[t+4:])) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 - e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 - e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 - - cv = load6432(src, s) - - if len(blk.sequences) <= 2 { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < e.bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += lenght + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - e.markLongShardDirty(nextHashL) - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) - longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) - e.longTable[longHash1] = te0 - e.longTable[longHash2] = te1 - e.markLongShardDirty(longHash1) - e.markLongShardDirty(longHash2) - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) - hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) - e.table[hashVal1] = te0 - e.markShardDirty(hashVal1) - e.table[hashVal2] = te1 - e.markShardDirty(hashVal2) - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // If we encoded more than 64K mark all dirty. - if len(src) > 64<<10 { - e.markAllShardsDirty() - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { - e.fastEncoder.Reset(d, singleBlock) - if d != nil { - panic("doubleFastEncoder: Reset with dict not supported") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { - allDirty := e.allDirty - e.fastEncoderDict.Reset(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]tableEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ - val: uint32(cv), - offset: e.maxMatchOff, - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) - e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ - val: uint32(cv), - offset: i, - } - } - } - e.lastDictID = d.id - allDirty = true - } - // Reset table to initial state - e.cur = e.maxMatchOff - - dirtyShardCnt := 0 - if !allDirty { - for i := range e.longTableShardDirty { - if e.longTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { - //copy(e.longTable[:], e.dictLongTable) - e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - return - } - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) - *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) - - e.longTableShardDirty[i] = false - } -} - -func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/dLongTableShardSize] = true -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_fast.go deleted file mode 100644 index f45a3da..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ /dev/null @@ -1,891 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" -) - -const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table - tableShardSize = tableSize / tableShardCnt // Size of an individual shard - tableFastHashLen = 6 - tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - maxMatchLength = 131074 -) - -type tableEntry struct { - val uint32 - offset int32 -} - -type fastEncoder struct { - fastBase - table [tableSize]tableEntry -} - -type fastEncoderDict struct { - fastEncoder - dictTable []tableEntry - tableShardDirty [tableShardCnt]bool - allDirty bool -} - -// Encode mimmics functionality in zstd_fast.c -func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if debugEncoder { - if len(src) > maxCompressedBlockSize { - panic("src too big") - } - } - - // Protect against e.cur wraparound. - if e.cur >= e.bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - - for { - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0 ", t)) - } - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < e.bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if e.allDirty || len(src) > 32<<10 { - e.fastEncoder.Encode(blk, src) - e.allDirty = true - return - } - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [tableSize]tableEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 7 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - e.markShardDirty(nextHash2) - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("fastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - if true { - end := e.maxMatchOff + int32(len(d.content)) - 8 - for i := e.maxMatchOff; i < end; i += 2 { - const hashLog = tableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 - nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - } - } - e.lastDictID = d.id - e.allDirty = true - } - - e.cur = e.maxMatchOff - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.tableShardDirty { - if e.tableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - const shardCnt = tableShardCnt - const shardSize = tableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - //copy(e.table[:], e.dictTable) - e.table = *(*[tableSize]tableEntry)(e.dictTable) - for i := range e.tableShardDirty { - e.tableShardDirty[i] = false - } - e.allDirty = false - return - } - for i := range e.tableShardDirty { - if !e.tableShardDirty[i] { - continue - } - - //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) - e.tableShardDirty[i] = false - } - e.allDirty = false -} - -func (e *fastEncoderDict) markAllShardsDirty() { - e.allDirty = true -} - -func (e *fastEncoderDict) markShardDirty(entryNum uint32) { - e.tableShardDirty[entryNum/tableShardSize] = true -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/encoder.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/encoder.go deleted file mode 100644 index 72af7ef..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/encoder.go +++ /dev/null @@ -1,619 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "crypto/rand" - "fmt" - "io" - "math" - rdebug "runtime/debug" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -// Encoder provides encoding to Zstandard. -// An Encoder can be used for either compressing a stream via the -// io.WriteCloser interface supported by the Encoder or as multiple independent -// tasks via the EncodeAll function. -// Smaller encodes are encouraged to use the EncodeAll function. -// Use NewWriter to create a new instance. -type Encoder struct { - o encoderOptions - encoders chan encoder - state encoderState - init sync.Once -} - -type encoder interface { - Encode(blk *blockEnc, src []byte) - EncodeNoHist(blk *blockEnc, src []byte) - Block() *blockEnc - CRC() *xxhash.Digest - AppendCRC([]byte) []byte - WindowSize(size int64) int32 - UseBlock(*blockEnc) - Reset(d *dict, singleBlock bool) -} - -type encoderState struct { - w io.Writer - filling []byte - current []byte - previous []byte - encoder encoder - writing *blockEnc - err error - writeErr error - nWritten int64 - nInput int64 - frameContentSize int64 - headerWritten bool - eofWritten bool - fullFrameWritten bool - - // This waitgroup indicates an encode is running. - wg sync.WaitGroup - // This waitgroup indicates we have a block encoding/writing. - wWg sync.WaitGroup -} - -// NewWriter will create a new Zstandard encoder. -// If the encoder will be used for encoding blocks a nil writer can be used. -func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { - initPredefined() - var e Encoder - e.o.setDefault() - for _, o := range opts { - err := o(&e.o) - if err != nil { - return nil, err - } - } - if w != nil { - e.Reset(w) - } - return &e, nil -} - -func (e *Encoder) initialize() { - if e.o.concurrent == 0 { - e.o.setDefault() - } - e.encoders = make(chan encoder, e.o.concurrent) - for i := 0; i < e.o.concurrent; i++ { - enc := e.o.encoder() - e.encoders <- enc - } -} - -// Reset will re-initialize the writer and new writes will encode to the supplied writer -// as a new, independent stream. -func (e *Encoder) Reset(w io.Writer) { - s := &e.state - s.wg.Wait() - s.wWg.Wait() - if cap(s.filling) == 0 { - s.filling = make([]byte, 0, e.o.blockSize) - } - if e.o.concurrent > 1 { - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) - } - s.current = s.current[:0] - s.previous = s.previous[:0] - if s.writing == nil { - s.writing = &blockEnc{lowMem: e.o.lowMem} - s.writing.init() - } - s.writing.initNewEncode() - } - if s.encoder == nil { - s.encoder = e.o.encoder() - } - s.filling = s.filling[:0] - s.encoder.Reset(e.o.dict, false) - s.headerWritten = false - s.eofWritten = false - s.fullFrameWritten = false - s.w = w - s.err = nil - s.nWritten = 0 - s.nInput = 0 - s.writeErr = nil - s.frameContentSize = 0 -} - -// ResetContentSize will reset and set a content size for the next stream. -// If the bytes written does not match the size given an error will be returned -// when calling Close(). -// This is removed when Reset is called. -// Sizes <= 0 results in no content size set. -func (e *Encoder) ResetContentSize(w io.Writer, size int64) { - e.Reset(w) - if size >= 0 { - e.state.frameContentSize = size - } -} - -// Write data to the encoder. -// Input data will be buffered and as the buffer fills up -// content will be compressed and written to the output. -// When done writing, use Close to flush the remaining output -// and write CRC if requested. -func (e *Encoder) Write(p []byte) (n int, err error) { - s := &e.state - for len(p) > 0 { - if len(p)+len(s.filling) < e.o.blockSize { - if e.o.crc { - _, _ = s.encoder.CRC().Write(p) - } - s.filling = append(s.filling, p...) - return n + len(p), nil - } - add := p - if len(p)+len(s.filling) > e.o.blockSize { - add = add[:e.o.blockSize-len(s.filling)] - } - if e.o.crc { - _, _ = s.encoder.CRC().Write(add) - } - s.filling = append(s.filling, add...) - p = p[len(add):] - n += len(add) - if len(s.filling) < e.o.blockSize { - return n, nil - } - err := e.nextBlock(false) - if err != nil { - return n, err - } - if debugAsserts && len(s.filling) > 0 { - panic(len(s.filling)) - } - } - return n, nil -} - -// nextBlock will synchronize and start compressing input in e.state.filling. -// If an error has occurred during encoding it will be returned. -func (e *Encoder) nextBlock(final bool) error { - s := &e.state - // Wait for current block. - s.wg.Wait() - if s.err != nil { - return s.err - } - if len(s.filling) > e.o.blockSize { - return fmt.Errorf("block > maxStoreBlockSize") - } - if !s.headerWritten { - // If we have a single block encode, do a sync compression. - if final && len(s.filling) == 0 && !e.o.fullZero { - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) - var n2 int - n2, s.err = s.w.Write(s.current) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - s.nInput += int64(len(s.filling)) - s.current = s.current[:0] - s.filling = s.filling[:0] - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - - var tmp [maxHeaderSize]byte - fh := frameHeader{ - ContentSize: uint64(s.frameContentSize), - WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), - SingleSegment: false, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - dst := fh.appendTo(tmp[:0]) - s.headerWritten = true - s.wWg.Wait() - var n2 int - n2, s.err = s.w.Write(dst) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - } - if s.eofWritten { - // Ensure we only write it once. - final = false - } - - if len(s.filling) == 0 { - // Final block, but no data. - if final { - enc := s.encoder - blk := enc.Block() - blk.reset(nil) - blk.last = true - blk.encodeRaw(nil) - s.wWg.Wait() - _, s.err = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - s.eofWritten = true - } - return s.err - } - - // SYNC: - if e.o.concurrent == 1 { - src := s.filling - s.nInput += int64(len(s.filling)) - if debugEncoder { - println("Adding sync block,", len(src), "bytes, final:", final) - } - enc := s.encoder - blk := enc.Block() - blk.reset(nil) - enc.Encode(blk, src) - blk.last = final - if final { - s.eofWritten = true - } - - s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - if s.err != nil { - return s.err - } - _, s.err = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - s.filling = s.filling[:0] - return s.err - } - - // Move blocks forward. - s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current - s.nInput += int64(len(s.current)) - s.wg.Add(1) - go func(src []byte) { - if debugEncoder { - println("Adding block,", len(src), "bytes, final:", final) - } - defer func() { - if r := recover(); r != nil { - s.err = fmt.Errorf("panic while encoding: %v", r) - rdebug.PrintStack() - } - s.wg.Done() - }() - enc := s.encoder - blk := enc.Block() - enc.Encode(blk, src) - blk.last = final - if final { - s.eofWritten = true - } - // Wait for pending writes. - s.wWg.Wait() - if s.writeErr != nil { - s.err = s.writeErr - return - } - // Transfer encoders from previous write block. - blk.swapEncoders(s.writing) - // Transfer recent offsets to next. - enc.UseBlock(s.writing) - s.writing = blk - s.wWg.Add(1) - go func() { - defer func() { - if r := recover(); r != nil { - s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) - rdebug.PrintStack() - } - s.wWg.Done() - }() - s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - if s.writeErr != nil { - return - } - _, s.writeErr = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - }() - }(s.current) - return nil -} - -// ReadFrom reads data from r until EOF or error. -// The return value n is the number of bytes read. -// Any error except io.EOF encountered during the read is also returned. -// -// The Copy function uses ReaderFrom if available. -func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { - if debugEncoder { - println("Using ReadFrom") - } - - // Flush any current writes. - if len(e.state.filling) > 0 { - if err := e.nextBlock(false); err != nil { - return 0, err - } - } - e.state.filling = e.state.filling[:e.o.blockSize] - src := e.state.filling - for { - n2, err := r.Read(src) - if e.o.crc { - _, _ = e.state.encoder.CRC().Write(src[:n2]) - } - // src is now the unfilled part... - src = src[n2:] - n += int64(n2) - switch err { - case io.EOF: - e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] - if debugEncoder { - println("ReadFrom: got EOF final block:", len(e.state.filling)) - } - return n, nil - case nil: - default: - if debugEncoder { - println("ReadFrom: got error:", err) - } - e.state.err = err - return n, err - } - if len(src) > 0 { - if debugEncoder { - println("ReadFrom: got space left in source:", len(src)) - } - continue - } - err = e.nextBlock(false) - if err != nil { - return n, err - } - e.state.filling = e.state.filling[:e.o.blockSize] - src = e.state.filling - } -} - -// Flush will send the currently written data to output -// and block until everything has been written. -// This should only be used on rare occasions where pushing the currently queued data is critical. -func (e *Encoder) Flush() error { - s := &e.state - if len(s.filling) > 0 { - err := e.nextBlock(false) - if err != nil { - return err - } - } - s.wg.Wait() - s.wWg.Wait() - if s.err != nil { - return s.err - } - return s.writeErr -} - -// Close will flush the final output and close the stream. -// The function will block until everything has been written. -// The Encoder can still be re-used after calling this. -func (e *Encoder) Close() error { - s := &e.state - if s.encoder == nil { - return nil - } - err := e.nextBlock(true) - if err != nil { - return err - } - if s.frameContentSize > 0 { - if s.nInput != s.frameContentSize { - return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) - } - } - if e.state.fullFrameWritten { - return s.err - } - s.wg.Wait() - s.wWg.Wait() - - if s.err != nil { - return s.err - } - if s.writeErr != nil { - return s.writeErr - } - - // Write CRC - if e.o.crc && s.err == nil { - // heap alloc. - var tmp [4]byte - _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) - s.nWritten += 4 - } - - // Add padding with content from crypto/rand.Reader - if s.err == nil && e.o.pad > 0 { - add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) - frame, err := skippableFrame(s.filling[:0], add, rand.Reader) - if err != nil { - return err - } - _, s.err = s.w.Write(frame) - } - return s.err -} - -// EncodeAll will encode all input in src and append it to dst. -// This function can be called concurrently, but each call will only run on a single goroutine. -// If empty input is given, nothing is returned, unless WithZeroFrames is specified. -// Encoded blocks can be concatenated and the result will be the combined input stream. -// Data compressed with EncodeAll can be decoded with the Decoder, -// using either a stream or DecodeAll. -func (e *Encoder) EncodeAll(src, dst []byte) []byte { - if len(src) == 0 { - if e.o.fullZero { - // Add frame header. - fh := frameHeader{ - ContentSize: 0, - WindowSize: MinWindowSize, - SingleSegment: true, - // Adding a checksum would be a waste of space. - Checksum: false, - DictID: 0, - } - dst = fh.appendTo(dst) - - // Write raw block as last one only. - var blk blockHeader - blk.setSize(0) - blk.setType(blockTypeRaw) - blk.setLast(true) - dst = blk.appendTo(dst) - } - return dst - } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() - // Use single segments when above minimum window and below window size. - single := len(src) <= e.o.windowSize && len(src) > MinWindowSize - if e.o.single != nil { - single = *e.o.single - } - fh := frameHeader{ - ContentSize: uint64(len(src)), - WindowSize: uint32(enc.WindowSize(int64(len(src)))), - SingleSegment: single, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - // If less than 1MB, allocate a buffer up front. - if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { - dst = make([]byte, 0, len(src)) - } - dst = fh.appendTo(dst) - - // If we can do everything in one block, prefer that. - if len(src) <= e.o.blockSize { - enc.Reset(e.o.dict, true) - // Slightly faster with no history and everything in one block. - if e.o.crc { - _, _ = enc.CRC().Write(src) - } - blk := enc.Block() - blk.last = true - if e.o.dict == nil { - enc.EncodeNoHist(blk, src) - } else { - enc.Encode(blk, src) - } - - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - oldout := blk.output - // Output directly to dst - blk.output = dst - - err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - if err != nil { - panic(err) - } - dst = blk.output - blk.output = oldout - } else { - enc.Reset(e.o.dict, false) - blk := enc.Block() - for len(src) > 0 { - todo := src - if len(todo) > e.o.blockSize { - todo = todo[:e.o.blockSize] - } - src = src[len(todo):] - if e.o.crc { - _, _ = enc.CRC().Write(todo) - } - blk.pushOffsets() - enc.Encode(blk, todo) - if len(src) == 0 { - blk.last = true - } - err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) - if err != nil { - panic(err) - } - dst = append(dst, blk.output...) - blk.reset(nil) - } - } - if e.o.crc { - dst = enc.AppendCRC(dst) - } - // Add padding with content from crypto/rand.Reader - if e.o.pad > 0 { - add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) - var err error - dst, err = skippableFrame(dst, add, rand.Reader) - if err != nil { - panic(err) - } - } - return dst -} - -// MaxEncodedSize returns the expected maximum -// size of an encoded block or stream. -func (e *Encoder) MaxEncodedSize(size int) int { - frameHeader := 4 + 2 // magic + frame header & window descriptor - if e.o.dict != nil { - frameHeader += 4 - } - // Frame content size: - if size < 256 { - frameHeader++ - } else if size < 65536+256 { - frameHeader += 2 - } else if size < math.MaxInt32 { - frameHeader += 4 - } else { - frameHeader += 8 - } - // Final crc - if e.o.crc { - frameHeader += 4 - } - - // Max overhead is 3 bytes/block. - // There cannot be 0 blocks. - blocks := (size + e.o.blockSize) / e.o.blockSize - - // Combine, add padding. - maxSz := frameHeader + 3*blocks + size - if e.o.pad > 1 { - maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) - } - return maxSz -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/encoder_options.go deleted file mode 100644 index faaf819..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ /dev/null @@ -1,339 +0,0 @@ -package zstd - -import ( - "errors" - "fmt" - "math" - "math/bits" - "runtime" - "strings" -) - -// EOption is an option for creating a encoder. -type EOption func(*encoderOptions) error - -// options retains accumulated state of multiple options. -type encoderOptions struct { - concurrent int - level EncoderLevel - single *bool - pad int - blockSize int - windowSize int - crc bool - fullZero bool - noEntropy bool - allLitEntropy bool - customWindow bool - customALEntropy bool - customBlockSize bool - lowMem bool - dict *dict -} - -func (o *encoderOptions) setDefault() { - *o = encoderOptions{ - concurrent: runtime.GOMAXPROCS(0), - crc: true, - single: nil, - blockSize: maxCompressedBlockSize, - windowSize: 8 << 20, - level: SpeedDefault, - allLitEntropy: false, - lowMem: false, - } -} - -// encoder returns an encoder with the selected options. -func (o encoderOptions) encoder() encoder { - switch o.level { - case SpeedFastest: - if o.dict != nil { - return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} - } - return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} - - case SpeedDefault: - if o.dict != nil { - return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} - } - return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} - case SpeedBetterCompression: - if o.dict != nil { - return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} - } - return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} - case SpeedBestCompression: - return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} - } - panic("unknown compression level") -} - -// WithEncoderCRC will add CRC value to output. -// Output will be 4 bytes larger. -func WithEncoderCRC(b bool) EOption { - return func(o *encoderOptions) error { o.crc = b; return nil } -} - -// WithEncoderConcurrency will set the concurrency, -// meaning the maximum number of encoders to run concurrently. -// The value supplied must be at least 1. -// For streams, setting a value of 1 will disable async compression. -// By default this will be set to GOMAXPROCS. -func WithEncoderConcurrency(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("concurrency must be at least 1") - } - o.concurrent = n - return nil - } -} - -// WithWindowSize will set the maximum allowed back-reference distance. -// The value must be a power of two between MinWindowSize and MaxWindowSize. -// A larger value will enable better compression but allocate more memory and, -// for above-default values, take considerably longer. -// The default value is determined by the compression level. -func WithWindowSize(n int) EOption { - return func(o *encoderOptions) error { - switch { - case n < MinWindowSize: - return fmt.Errorf("window size must be at least %d", MinWindowSize) - case n > MaxWindowSize: - return fmt.Errorf("window size must be at most %d", MaxWindowSize) - case (n & (n - 1)) != 0: - return errors.New("window size must be a power of 2") - } - - o.windowSize = n - o.customWindow = true - if o.blockSize > o.windowSize { - o.blockSize = o.windowSize - o.customBlockSize = true - } - return nil - } -} - -// WithEncoderPadding will add padding to all output so the size will be a multiple of n. -// This can be used to obfuscate the exact output size or make blocks of a certain size. -// The contents will be a skippable frame, so it will be invisible by the decoder. -// n must be > 0 and <= 1GB, 1<<30 bytes. -// The padded area will be filled with data from crypto/rand.Reader. -// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. -func WithEncoderPadding(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("padding must be at least 1") - } - // No need to waste our time. - if n == 1 { - n = 0 - } - if n > 1<<30 { - return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") - } - o.pad = n - return nil - } -} - -// EncoderLevel predefines encoder compression levels. -// Only use the constants made available, since the actual mapping -// of these values are very likely to change and your compression could change -// unpredictably when upgrading the library. -type EncoderLevel int - -const ( - speedNotSet EncoderLevel = iota - - // SpeedFastest will choose the fastest reasonable compression. - // This is roughly equivalent to the fastest Zstandard mode. - SpeedFastest - - // SpeedDefault is the default "pretty fast" compression option. - // This is roughly equivalent to the default Zstandard mode (level 3). - SpeedDefault - - // SpeedBetterCompression will yield better compression than the default. - // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. - // By using this, notice that CPU usage may go up in the future. - SpeedBetterCompression - - // SpeedBestCompression will choose the best available compression option. - // This will offer the best compression no matter the CPU cost. - SpeedBestCompression - - // speedLast should be kept as the last actual compression option. - // The is not for external usage, but is used to keep track of the valid options. - speedLast -) - -// EncoderLevelFromString will convert a string representation of an encoding level back -// to a compression level. The compare is not case sensitive. -// If the string wasn't recognized, (false, SpeedDefault) will be returned. -func EncoderLevelFromString(s string) (bool, EncoderLevel) { - for l := speedNotSet + 1; l < speedLast; l++ { - if strings.EqualFold(s, l.String()) { - return true, l - } - } - return false, SpeedDefault -} - -// EncoderLevelFromZstd will return an encoder level that closest matches the compression -// ratio of a specific zstd compression level. -// Many input values will provide the same compression level. -func EncoderLevelFromZstd(level int) EncoderLevel { - switch { - case level < 3: - return SpeedFastest - case level >= 3 && level < 6: - return SpeedDefault - case level >= 6 && level < 10: - return SpeedBetterCompression - default: - return SpeedBestCompression - } -} - -// String provides a string representation of the compression level. -func (e EncoderLevel) String() string { - switch e { - case SpeedFastest: - return "fastest" - case SpeedDefault: - return "default" - case SpeedBetterCompression: - return "better" - case SpeedBestCompression: - return "best" - default: - return "invalid" - } -} - -// WithEncoderLevel specifies a predefined compression level. -func WithEncoderLevel(l EncoderLevel) EOption { - return func(o *encoderOptions) error { - switch { - case l <= speedNotSet || l >= speedLast: - return fmt.Errorf("unknown encoder level") - } - o.level = l - if !o.customWindow { - switch o.level { - case SpeedFastest: - o.windowSize = 4 << 20 - if !o.customBlockSize { - o.blockSize = 1 << 16 - } - case SpeedDefault: - o.windowSize = 8 << 20 - case SpeedBetterCompression: - o.windowSize = 16 << 20 - case SpeedBestCompression: - o.windowSize = 32 << 20 - } - } - if !o.customALEntropy { - o.allLitEntropy = l > SpeedDefault - } - - return nil - } -} - -// WithZeroFrames will encode 0 length input as full frames. -// This can be needed for compatibility with zstandard usage, -// but is not needed for this package. -func WithZeroFrames(b bool) EOption { - return func(o *encoderOptions) error { - o.fullZero = b - return nil - } -} - -// WithAllLitEntropyCompression will apply entropy compression if no matches are found. -// Disabling this will skip incompressible data faster, but in cases with no matches but -// skewed character distribution compression is lost. -// Default value depends on the compression level selected. -func WithAllLitEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.customALEntropy = true - o.allLitEntropy = b - return nil - } -} - -// WithNoEntropyCompression will always skip entropy compression of literals. -// This can be useful if content has matches, but unlikely to benefit from entropy -// compression. Usually the slight speed improvement is not worth enabling this. -func WithNoEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.noEntropy = b - return nil - } -} - -// WithSingleSegment will set the "single segment" flag when EncodeAll is used. -// If this flag is set, data must be regenerated within a single continuous memory segment. -// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. -// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. -// In order to preserve the decoder from unreasonable memory requirements, -// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. -// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. -// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. -// If this is not specified, block encodes will automatically choose this based on the input size and the window size. -// This setting has no effect on streamed encodes. -func WithSingleSegment(b bool) EOption { - return func(o *encoderOptions) error { - o.single = &b - return nil - } -} - -// WithLowerEncoderMem will trade in some memory cases trade less memory usage for -// slower encoding speed. -// This will not change the window size which is the primary function for reducing -// memory usage. See WithWindowSize. -func WithLowerEncoderMem(b bool) EOption { - return func(o *encoderOptions) error { - o.lowMem = b - return nil - } -} - -// WithEncoderDict allows to register a dictionary that will be used for the encode. -// -// The slice dict must be in the [dictionary format] produced by -// "zstd --train" from the Zstandard reference implementation. -// -// The encoder *may* choose to use no dictionary instead for certain payloads. -// -// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format -func WithEncoderDict(dict []byte) EOption { - return func(o *encoderOptions) error { - d, err := loadDict(dict) - if err != nil { - return err - } - o.dict = d - return nil - } -} - -// WithEncoderDictRaw registers a dictionary that may be used by the encoder. -// -// The slice content may contain arbitrary data. It will be used as an initial -// history. -func WithEncoderDictRaw(id uint32, content []byte) EOption { - return func(o *encoderOptions) error { - if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { - return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) - } - o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} - return nil - } -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/framedec.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/framedec.go deleted file mode 100644 index 53e160f..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/framedec.go +++ /dev/null @@ -1,413 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "encoding/hex" - "errors" - "io" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type frameDec struct { - o decoderOptions - crc *xxhash.Digest - - WindowSize uint64 - - // Frame history passed between blocks - history history - - rawInput byteBuffer - - // Byte buffer that can be reused for small input blocks. - bBuf byteBuf - - FrameContentSize uint64 - - DictionaryID uint32 - HasCheckSum bool - SingleSegment bool -} - -const ( - // MinWindowSize is the minimum Window Size, which is 1 KB. - MinWindowSize = 1 << 10 - - // MaxWindowSize is the maximum encoder window size - // and the default decoder maximum window size. - MaxWindowSize = 1 << 29 -) - -const ( - frameMagic = "\x28\xb5\x2f\xfd" - skippableFrameMagic = "\x2a\x4d\x18" -) - -func newFrameDec(o decoderOptions) *frameDec { - if o.maxWindowSize > o.maxDecodedSize { - o.maxWindowSize = o.maxDecodedSize - } - d := frameDec{ - o: o, - } - return &d -} - -// reset will read the frame header and prepare for block decoding. -// If nothing can be read from the input, io.EOF will be returned. -// Any other error indicated that the stream contained data, but -// there was a problem. -func (d *frameDec) reset(br byteBuffer) error { - d.HasCheckSum = false - d.WindowSize = 0 - var signature [4]byte - for { - var err error - // Check if we can read more... - b, err := br.readSmall(1) - switch err { - case io.EOF, io.ErrUnexpectedEOF: - return io.EOF - case nil: - signature[0] = b[0] - default: - return err - } - // Read the rest, don't allow io.ErrUnexpectedEOF - b, err = br.readSmall(3) - switch err { - case io.EOF: - return io.EOF - case nil: - copy(signature[1:], b) - default: - return err - } - - if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { - if debugDecoder { - println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) - } - // Break if not skippable frame. - break - } - // Read size to skip - b, err = br.readSmall(4) - if err != nil { - if debugDecoder { - println("Reading Frame Size", err) - } - return err - } - n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - println("Skipping frame with", n, "bytes.") - err = br.skipN(int64(n)) - if err != nil { - if debugDecoder { - println("Reading discarded frame", err) - } - return err - } - } - if string(signature[:]) != frameMagic { - if debugDecoder { - println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) - } - return ErrMagicMismatch - } - - // Read Frame_Header_Descriptor - fhd, err := br.readByte() - if err != nil { - if debugDecoder { - println("Reading Frame_Header_Descriptor", err) - } - return err - } - d.SingleSegment = fhd&(1<<5) != 0 - - if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - d.WindowSize = 0 - if !d.SingleSegment { - wd, err := br.readByte() - if err != nil { - if debugDecoder { - println("Reading Window_Descriptor", err) - } - return err - } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - d.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = 0 - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - - b, err := br.readSmall(int(size)) - if err != nil { - println("Reading Dictionary_ID", err) - return err - } - var id uint32 - switch len(b) { - case 1: - id = uint32(b[0]) - case 2: - id = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - if debugDecoder { - println("Dict size", size, "ID:", id) - } - d.DictionaryID = id - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if d.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - d.FrameContentSize = fcsUnknown - if fcsSize > 0 { - b, err := br.readSmall(fcsSize) - if err != nil { - println("Reading Frame content", err) - return err - } - switch len(b) { - case 1: - d.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - if debugDecoder { - println("Read FCS:", d.FrameContentSize) - } - } - - // Move this to shared. - d.HasCheckSum = fhd&(1<<2) != 0 - if d.HasCheckSum { - if d.crc == nil { - d.crc = xxhash.New() - } - d.crc.Reset() - } - - if d.WindowSize > d.o.maxWindowSize { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) - } - return ErrWindowSizeExceeded - } - - if d.WindowSize == 0 && d.SingleSegment { - // We may not need window in this case. - d.WindowSize = d.FrameContentSize - if d.WindowSize < MinWindowSize { - d.WindowSize = MinWindowSize - } - if d.WindowSize > d.o.maxDecodedSize { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) - } - return ErrDecoderSizeExceeded - } - } - - // The minimum Window_Size is 1 KB. - if d.WindowSize < MinWindowSize { - if debugDecoder { - println("got window size: ", d.WindowSize) - } - return ErrWindowSizeTooSmall - } - d.history.windowSize = int(d.WindowSize) - if !d.o.lowMem || d.history.windowSize < maxBlockSize { - // Alloc 2x window size if not low-mem, or window size below 2MB. - d.history.allocFrameBuffer = d.history.windowSize * 2 - } else { - if d.o.lowMem { - // Alloc with 1MB extra. - d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 - } else { - // Alloc with 2MB extra. - d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize - } - } - - if debugDecoder { - println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) - } - - // history contains input - maybe we do something - d.rawInput = br - return nil -} - -// next will start decoding the next block from stream. -func (d *frameDec) next(block *blockDec) error { - if debugDecoder { - println("decoding new block") - } - err := block.reset(d.rawInput, d.WindowSize) - if err != nil { - println("block error:", err) - // Signal the frame decoder we have a problem. - block.sendErr(err) - return err - } - return nil -} - -// checkCRC will check the checksum, assuming the frame has one. -// Will return ErrCRCMismatch if crc check failed, otherwise nil. -func (d *frameDec) checkCRC() error { - // We can overwrite upper tmp now - buf, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - return err - } - - want := binary.LittleEndian.Uint32(buf[:4]) - got := uint32(d.crc.Sum64()) - - if got != want { - if debugDecoder { - printf("CRC check failed: got %08x, want %08x\n", got, want) - } - return ErrCRCMismatch - } - if debugDecoder { - printf("CRC ok %08x\n", got) - } - return nil -} - -// consumeCRC skips over the checksum, assuming the frame has one. -func (d *frameDec) consumeCRC() error { - _, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - } - return err -} - -// runDecoder will run the decoder for the remainder of the frame. -func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { - saved := d.history.b - - // We use the history for output to avoid copying it. - d.history.b = dst - d.history.ignoreBuffer = len(dst) - // Store input length, so we only check new data. - crcStart := len(dst) - d.history.decoders.maxSyncLen = 0 - if d.o.limitToCap { - d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) - } - if d.FrameContentSize != fcsUnknown { - if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { - d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) - } - if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { - if debugDecoder { - println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) - } - return dst, ErrDecoderSizeExceeded - } - if debugDecoder { - println("maxSyncLen:", d.history.decoders.maxSyncLen) - } - if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { - // Alloc for output - dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) - copy(dst2, dst) - dst = dst2 - } - } - var err error - for { - err = dec.reset(d.rawInput, d.WindowSize) - if err != nil { - break - } - if debugDecoder { - println("next block:", dec) - } - err = dec.decodeBuf(&d.history) - if err != nil { - break - } - if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { - println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) - err = ErrDecoderSizeExceeded - break - } - if d.o.limitToCap && len(d.history.b) > cap(dst) { - println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) - err = ErrDecoderSizeExceeded - break - } - if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { - println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) - err = ErrFrameSizeExceeded - break - } - if dec.Last { - break - } - if debugDecoder { - println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) - } - } - dst = d.history.b - if err == nil { - if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { - err = ErrFrameSizeMismatch - } else if d.HasCheckSum { - if d.o.ignoreChecksum { - err = d.consumeCRC() - } else { - d.crc.Write(dst[crcStart:]) - err = d.checkCRC() - } - } - } - d.history.b = saved - return dst, err -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/frameenc.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/frameenc.go deleted file mode 100644 index 2f5d5ed..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "fmt" - "io" - "math" - "math/bits" -) - -type frameHeader struct { - ContentSize uint64 - WindowSize uint32 - SingleSegment bool - Checksum bool - DictID uint32 -} - -const maxHeaderSize = 14 - -func (f frameHeader) appendTo(dst []byte) []byte { - dst = append(dst, frameMagic...) - var fhd uint8 - if f.Checksum { - fhd |= 1 << 2 - } - if f.SingleSegment { - fhd |= 1 << 5 - } - - var dictIDContent []byte - if f.DictID > 0 { - var tmp [4]byte - if f.DictID < 256 { - fhd |= 1 - tmp[0] = uint8(f.DictID) - dictIDContent = tmp[:1] - } else if f.DictID < 1<<16 { - fhd |= 2 - binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) - dictIDContent = tmp[:2] - } else { - fhd |= 3 - binary.LittleEndian.PutUint32(tmp[:4], f.DictID) - dictIDContent = tmp[:4] - } - } - var fcs uint8 - if f.ContentSize >= 256 { - fcs++ - } - if f.ContentSize >= 65536+256 { - fcs++ - } - if f.ContentSize >= 0xffffffff { - fcs++ - } - - fhd |= fcs << 6 - - dst = append(dst, fhd) - if !f.SingleSegment { - const winLogMin = 10 - windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 - dst = append(dst, uint8(windowLog)) - } - if f.DictID > 0 { - dst = append(dst, dictIDContent...) - } - switch fcs { - case 0: - if f.SingleSegment { - dst = append(dst, uint8(f.ContentSize)) - } - // Unless SingleSegment is set, framessizes < 256 are nto stored. - case 1: - f.ContentSize -= 256 - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) - case 2: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) - case 3: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), - uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) - default: - panic("invalid fcs") - } - return dst -} - -const skippableFrameHeader = 4 + 4 - -// calcSkippableFrame will return a total size to be added for written -// to be divisible by multiple. -// The value will always be > skippableFrameHeader. -// The function will panic if written < 0 or wantMultiple <= 0. -func calcSkippableFrame(written, wantMultiple int64) int { - if wantMultiple <= 0 { - panic("wantMultiple <= 0") - } - if written < 0 { - panic("written < 0") - } - leftOver := written % wantMultiple - if leftOver == 0 { - return 0 - } - toAdd := wantMultiple - leftOver - for toAdd < skippableFrameHeader { - toAdd += wantMultiple - } - return int(toAdd) -} - -// skippableFrame will add a skippable frame with a total size of bytes. -// total should be >= skippableFrameHeader and < math.MaxUint32. -func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { - if total == 0 { - return dst, nil - } - if total < skippableFrameHeader { - return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) - } - if int64(total) > math.MaxUint32 { - return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) - } - dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) - f := uint32(total - skippableFrameHeader) - dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) - start := len(dst) - dst = append(dst, make([]byte, f)...) - _, err := io.ReadFull(r, dst[start:]) - return dst, err -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_decoder.go deleted file mode 100644 index 2f8860a..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "fmt" - "io" -) - -const ( - tablelogAbsoluteMax = 9 -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = tablelogAbsoluteMax + 2 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - maxTableMask = (1 << maxTableLog) - 1 - minTablelog = 5 - maxSymbolValue = 255 -) - -// fseDecoder provides temporary storage for compression and decompression. -type fseDecoder struct { - dt [maxTablesize]decSymbol // Decompression table. - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - maxBits uint8 // Maximum number of additional bits - - // used for table creation to avoid allocations. - stateTable [256]uint16 - norm [maxSymbolValue + 1]int16 - preDefined bool -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -// readNCount will read the symbol distribution so decoding tables can be constructed. -func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { - var ( - charnum uint16 - previous0 bool - ) - if b.remain() < 4 { - return errors.New("input too small") - } - bitStream := b.Uint32NC() - nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog - if nbBits > tablelogAbsoluteMax { - println("Invalid tablelog:", nbBits) - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 && charnum <= maxSymbol { - if previous0 { - //println("prev0") - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - //println("24 x 0") - n0 += 24 - if r := b.remain(); r > 5 { - b.advance(2) - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - // end of bit stream - bitStream >>= 16 - bitCount += 16 - } - } - //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) - for charnum < n0 { - s.norm[uint8(charnum)] = 0 - charnum++ - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*threshold - 1) - remaining - var count int32 - - if int32(bitStream)&(threshold-1) < max { - count = int32(bitStream) & (threshold - 1) - if debugAsserts && nbBits < 1 { - panic("nbBits underflow") - } - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - // extra accuracy - count-- - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> (bitCount & 31) - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - bitStream = b.Uint32() >> (bitCount & 31) - } - } - s.symbolLen = charnum - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - return s.buildDtable() -} - -func (s *fseDecoder) mustReadFrom(r io.Reader) { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - // dt [maxTablesize]decSymbol // Decompression table. - // symbolLen uint16 // Length of active part of the symbol table. - // actualTableLog uint8 // Selected tablelog. - // maxBits uint8 // Maximum number of additional bits - // // used for table creation to avoid allocations. - // stateTable [256]uint16 - // norm [maxSymbolValue + 1]int16 - // preDefined bool - fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -// Using a composite uint64 is faster than a struct with separate members. -type decSymbol uint64 - -func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { - return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - -func (d decSymbol) nbBits() uint8 { - return uint8(d) -} - -func (d decSymbol) addBits() uint8 { - return uint8(d >> 8) -} - -func (d decSymbol) newState() uint16 { - return uint16(d >> 16) -} - -func (d decSymbol) baselineInt() int { - return int(d >> 32) -} - -func (d *decSymbol) setNBits(nBits uint8) { - const mask = 0xffffffffffffff00 - *d = (*d & mask) | decSymbol(nBits) -} - -func (d *decSymbol) setAddBits(addBits uint8) { - const mask = 0xffffffffffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) -} - -func (d *decSymbol) setNewState(state uint16) { - const mask = 0xffffffff0000ffff - *d = (*d & mask) | decSymbol(state)<<16 -} - -func (d *decSymbol) setExt(addBits uint8, baseline uint32) { - const mask = 0xffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) -} - -// decSymbolValue returns the transformed decSymbol for the given symbol. -func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { - if int(symb) >= len(t) { - return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) - } - lu := t[symb] - return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil -} - -// setRLE will set the decoder til RLE mode. -func (s *fseDecoder) setRLE(symbol decSymbol) { - s.actualTableLog = 0 - s.maxBits = symbol.addBits() - s.dt[0] = symbol -} - -// transform will transform the decoder table into a table usable for -// decoding without having to apply the transformation while decoding. -// The state will contain the base value and the number of bits to read. -func (s *fseDecoder) transform(t []baseOffset) error { - tableSize := uint16(1 << s.actualTableLog) - s.maxBits = 0 - for i, v := range s.dt[:tableSize] { - add := v.addBits() - if int(add) >= len(t) { - return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) - } - lu := t[add] - if lu.addBits > s.maxBits { - s.maxBits = lu.addBits - } - v.setExt(lu.addBits, lu.baseLine) - s.dt[i] = v - } - return nil -} - -type fseState struct { - dt []decSymbol - state decSymbol -} - -// Initialize and decodeAsync first state and symbol. -func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { - s.dt = dt - br.fill() - s.state = dt[br.getBits(tableLog)] -} - -// final returns the current state symbol without decoding the next. -func (s decSymbol) final() (int, uint8) { - return s.baselineInt(), s.addBits() -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go deleted file mode 100644 index d04a829..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go +++ /dev/null @@ -1,65 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package zstd - -import ( - "fmt" -) - -type buildDtableAsmContext struct { - // inputs - stateTable *uint16 - norm *int16 - dt *uint64 - - // outputs --- set by the procedure in the case of error; - // for interpretation please see the error handling part below - errParam1 uint64 - errParam2 uint64 -} - -// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. -// Function returns non-zero exit code on error. -// -//go:noescape -func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int - -// please keep in sync with _generate/gen_fse.go -const ( - errorCorruptedNormalizedCounter = 1 - errorNewStateTooBig = 2 - errorNewStateNoBits = 3 -) - -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - ctx := buildDtableAsmContext{ - stateTable: &s.stateTable[0], - norm: &s.norm[0], - dt: (*uint64)(&s.dt[0]), - } - code := buildDtable_asm(s, &ctx) - - if code != 0 { - switch code { - case errorCorruptedNormalizedCounter: - position := ctx.errParam1 - return fmt.Errorf("corrupted input (position=%d, expected 0)", position) - - case errorNewStateTooBig: - newState := decSymbol(ctx.errParam1) - size := ctx.errParam2 - return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) - - case errorNewStateNoBits: - newState := decSymbol(ctx.errParam1) - oldState := decSymbol(ctx.errParam2) - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) - - default: - return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) - } - } - return nil -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s deleted file mode 100644 index bcde398..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s +++ /dev/null @@ -1,126 +0,0 @@ -// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. - -//go:build !appengine && !noasm && gc && !noasm - -// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int -TEXT ·buildDtable_asm(SB), $0-24 - MOVQ ctx+8(FP), CX - MOVQ s+0(FP), DI - - // Load values - MOVBQZX 4098(DI), DX - XORQ AX, AX - BTSQ DX, AX - MOVQ (CX), BX - MOVQ 16(CX), SI - LEAQ -1(AX), R8 - MOVQ 8(CX), CX - MOVWQZX 4096(DI), DI - - // End load values - // Init, lay down lowprob symbols - XORQ R9, R9 - JMP init_main_loop_condition - -init_main_loop: - MOVWQSX (CX)(R9*2), R10 - CMPW R10, $-1 - JNE do_not_update_high_threshold - MOVB R9, 1(SI)(R8*8) - DECQ R8 - MOVQ $0x0000000000000001, R10 - -do_not_update_high_threshold: - MOVW R10, (BX)(R9*2) - INCQ R9 - -init_main_loop_condition: - CMPQ R9, DI - JL init_main_loop - - // Spread symbols - // Calculate table step - MOVQ AX, R9 - SHRQ $0x01, R9 - MOVQ AX, R10 - SHRQ $0x03, R10 - LEAQ 3(R9)(R10*1), R9 - - // Fill add bits values - LEAQ -1(AX), R10 - XORQ R11, R11 - XORQ R12, R12 - JMP spread_main_loop_condition - -spread_main_loop: - XORQ R13, R13 - MOVWQSX (CX)(R12*2), R14 - JMP spread_inner_loop_condition - -spread_inner_loop: - MOVB R12, 1(SI)(R11*8) - -adjust_position: - ADDQ R9, R11 - ANDQ R10, R11 - CMPQ R11, R8 - JG adjust_position - INCQ R13 - -spread_inner_loop_condition: - CMPQ R13, R14 - JL spread_inner_loop - INCQ R12 - -spread_main_loop_condition: - CMPQ R12, DI - JL spread_main_loop - TESTQ R11, R11 - JZ spread_check_ok - MOVQ ctx+8(FP), AX - MOVQ R11, 24(AX) - MOVQ $+1, ret+16(FP) - RET - -spread_check_ok: - // Build Decoding table - XORQ DI, DI - -build_table_main_table: - MOVBQZX 1(SI)(DI*8), CX - MOVWQZX (BX)(CX*2), R8 - LEAQ 1(R8), R9 - MOVW R9, (BX)(CX*2) - MOVQ R8, R9 - BSRQ R9, R9 - MOVQ DX, CX - SUBQ R9, CX - SHLQ CL, R8 - SUBQ AX, R8 - MOVB CL, (SI)(DI*8) - MOVW R8, 2(SI)(DI*8) - CMPQ R8, AX - JLE build_table_check1_ok - MOVQ ctx+8(FP), CX - MOVQ R8, 24(CX) - MOVQ AX, 32(CX) - MOVQ $+2, ret+16(FP) - RET - -build_table_check1_ok: - TESTB CL, CL - JNZ build_table_check2_ok - CMPW R8, DI - JNE build_table_check2_ok - MOVQ ctx+8(FP), AX - MOVQ R8, 24(AX) - MOVQ DI, 32(AX) - MOVQ $+3, ret+16(FP) - RET - -build_table_check2_ok: - INCQ DI - CMPQ DI, AX - JL build_table_main_table - MOVQ $+0, ret+16(FP) - RET diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go deleted file mode 100644 index 332e51f..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ /dev/null @@ -1,72 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -package zstd - -import ( - "errors" - "fmt" -) - -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - symbolNext := s.stateTable[:256] - - // Init, lay down lowprob symbols - { - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.dt[highThreshold].setAddBits(uint8(i)) - highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) - } - } - } - - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.dt[:tableSize] { - symbol := v.addBits() - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.dt[u&maxTableMask].setNBits(nBits) - newState := (nextState << nBits) - tableSize - if newState > tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.dt[u&maxTableMask].setNewState(newState) - } - } - return nil -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_encoder.go deleted file mode 100644 index ab26326..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ /dev/null @@ -1,701 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" -) - -const ( - // For encoding we only support up to - maxEncTableLog = 8 - maxEncTablesize = 1 << maxTableLog - maxEncTableMask = (1 << maxTableLog) - 1 - minEncTablelog = 5 - maxEncSymbolValue = maxMatchLengthSymbol -) - -// Scratch provides temporary storage for compression and decompression. -type fseEncoder struct { - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - ct cTable // Compression tables. - maxCount int // count of the most probable symbol - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - useRLE bool // This encoder is for RLE - preDefined bool // This encoder is predefined. - reUsed bool // Set to know when the encoder has been reused. - rleVal uint8 // RLE Symbol - maxBits uint8 // Maximum output bits after transform. - - // TODO: Technically zstd should be fine with 64 bytes. - count [256]uint32 - norm [256]int16 -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaNbBits uint32 - deltaFindState int16 - outBits uint8 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -func (s *fseEncoder) Histogram() *[256]uint32 { - return &s.count -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *fseEncoder) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *fseEncoder) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [256]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = total - 1 - total++ - default: - maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = total - v - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -func (s *fseEncoder) setRLE(val byte) { - s.allocCtable() - s.actualTableLog = 0 - s.ct.stateTable = s.ct.stateTable[:1] - s.ct.symbolTT[val] = symbolTransform{ - deltaFindState: 0, - deltaNbBits: 0, - } - if debugEncoder { - println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) - } - s.rleVal = val - s.useRLE = true -} - -// setBits will set output bits for the transform. -// if nil is provided, the number of bits is equal to the index. -func (s *fseEncoder) setBits(transform []byte) { - if s.reUsed || s.preDefined { - return - } - if s.useRLE { - if transform == nil { - s.ct.symbolTT[s.rleVal].outBits = s.rleVal - s.maxBits = s.rleVal - return - } - s.maxBits = transform[s.rleVal] - s.ct.symbolTT[s.rleVal].outBits = s.maxBits - return - } - if transform == nil { - for i := range s.ct.symbolTT[:s.symbolLen] { - s.ct.symbolTT[i].outBits = uint8(i) - } - s.maxBits = uint8(s.symbolLen - 1) - return - } - s.maxBits = 0 - for i, v := range transform[:s.symbolLen] { - s.ct.symbolTT[i].outBits = v - if v > s.maxBits { - // We could assume bits always going up, but we play safe. - s.maxBits = v - } - } -} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -// If successful, compression tables will also be made ready. -func (s *fseEncoder) normalizeCount(length int) error { - if s.reUsed { - return nil - } - s.optimalTableLog(length) - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(length) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(length >> tableLog) - ) - if s.maxCount == length { - s.useRLE = true - return nil - } - s.useRLE = false - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - err := s.normalizeCount2(length) - if err != nil { - return err - } - if debugAsserts { - err = s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() - } - s.norm[largest] += stillToDistribute - if debugAsserts { - err := s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *fseEncoder) normalizeCount2(length int) error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(length) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *fseEncoder) optimalTableLog(length int) { - tableLog := uint8(maxEncTableLog) - minBitsSrc := highBit(uint32(length)) + 1 - minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 - minBits := uint8(minBitsSymbols) - if minBitsSrc < minBitsSymbols { - minBits = uint8(minBitsSrc) - } - - maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minEncTablelog { - tableLog = minEncTablelog - } - if tableLog > maxEncTableLog { - tableLog = maxEncTableLog - } - s.actualTableLog = tableLog -} - -// validateNorm validates the normalized histogram table. -func (s *fseEncoder) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 - - // Write Table Size - bitStream = uint32(tableLog - minEncTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - outP = len(out) - ) - if cap(out) < outP+maxHeaderSize { - out = append(out, make([]byte, maxHeaderSize*3)...) - out = out[:len(out)-maxHeaderSize*3] - } - out = out[:outP+maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return nil, errors.New("internal error: remaining < 1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - if outP+2 > len(out) { - return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) - } - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += int((bitCount + 7) / 8) - - if charnum > s.symbolLen { - return nil, errors.New("internal error: charnum > s.symbolLen") - } - return out[:outP], nil -} - -// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) -// note 1 : assume symbolValue is valid (<= maxSymbolValue) -// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * -func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { - minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 - threshold := (minNbBits + 1) << 16 - if debugAsserts { - if !(s.actualTableLog < 16) { - panic("!s.actualTableLog < 16") - } - // ensure enough room for renormalization double shift - if !(uint8(accuracyLog) < 31-s.actualTableLog) { - panic("!uint8(accuracyLog) < 31-s.actualTableLog") - } - } - tableSize := uint32(1) << s.actualTableLog - deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) - // linear interpolation (very approximate) - normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog - bitMultiplier := uint32(1) << accuracyLog - if debugAsserts { - if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { - panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") - } - if normalizedDeltaFromThreshold > bitMultiplier { - panic("normalizedDeltaFromThreshold > bitMultiplier") - } - } - return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold -} - -// Returns the cost in bits of encoding the distribution in count using ctable. -// Histogram should only be up to the last non-zero symbol. -// Returns an -1 if ctable cannot represent all the symbols in count. -func (s *fseEncoder) approxSize(hist []uint32) uint32 { - if int(s.symbolLen) < len(hist) { - // More symbols than we have. - return math.MaxUint32 - } - if s.useRLE { - // We will never reuse RLE encoders. - return math.MaxUint32 - } - const kAccuracyLog = 8 - badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog - var cost uint32 - for i, v := range hist { - if v == 0 { - continue - } - if s.norm[i] == 0 { - return math.MaxUint32 - } - bitCost := s.bitCost(uint8(i), kAccuracyLog) - if bitCost > badCost { - return math.MaxUint32 - } - cost += v * bitCost - } - return cost >> kAccuracyLog -} - -// maxHeaderSize returns the maximum header size in bits. -// This is not exact size, but we want a penalty for new tables anyway. -func (s *fseEncoder) maxHeaderSize() uint32 { - if s.preDefined { - return 0 - } - if s.useRLE { - return 8 - } - return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - if len(c.stateTable) == 1 { - // RLE - c.stateTable[0] = uint16(0) - c.state = 0 - return - } - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + int32(first.deltaFindState) - c.state = c.stateTable[lu] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_predefined.go deleted file mode 100644 index 474cb77..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/fse_predefined.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "math" - "sync" -) - -var ( - // fsePredef are the predefined fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredef [3]fseDecoder - - // fsePredefEnc are the predefined encoder based on fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredefEnc [3]fseEncoder - - // symbolTableX contain the transformations needed for each type as defined in - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - symbolTableX [3][]baseOffset - - // maxTableSymbol is the biggest supported symbol for each table type - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} - - // bitTables is the bits table for each table. - bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} -) - -type tableIndex uint8 - -const ( - // indexes for fsePredef and symbolTableX - tableLiteralLengths tableIndex = 0 - tableOffsets tableIndex = 1 - tableMatchLengths tableIndex = 2 - - maxLiteralLengthSymbol = 35 - maxOffsetLengthSymbol = 30 - maxMatchLengthSymbol = 52 -) - -// baseOffset is used for calculating transformations. -type baseOffset struct { - baseLine uint32 - addBits uint8 -} - -// fillBase will precalculate base offsets with the given bit distributions. -func fillBase(dst []baseOffset, base uint32, bits ...uint8) { - if len(bits) != len(dst) { - panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) - } - for i, bit := range bits { - if base > math.MaxInt32 { - panic("invalid decoding table, base overflows int32") - } - - dst[i] = baseOffset{ - baseLine: base, - addBits: bit, - } - base += 1 << bit - } -} - -var predef sync.Once - -func initPredefined() { - predef.Do(func() { - // Literals length codes - tmp := make([]baseOffset, 36) - for i := range tmp[:16] { - tmp[i] = baseOffset{ - baseLine: uint32(i), - addBits: 0, - } - } - fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableLiteralLengths] = tmp - - // Match length codes - tmp = make([]baseOffset, 53) - for i := range tmp[:32] { - tmp[i] = baseOffset{ - // The transformation adds the 3 length. - baseLine: uint32(i) + 3, - addBits: 0, - } - } - fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableMatchLengths] = tmp - - // Offset codes - tmp = make([]baseOffset, maxOffsetBits+1) - tmp[1] = baseOffset{ - baseLine: 1, - addBits: 1, - } - fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) - symbolTableX[tableOffsets] = tmp - - // Fill predefined tables and transform them. - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - for i := range fsePredef[:] { - f := &fsePredef[i] - switch tableIndex(i) { - case tableLiteralLengths: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 - f.actualTableLog = 6 - copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, - -1, -1, -1, -1}) - f.symbolLen = 36 - case tableOffsets: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 - f.actualTableLog = 5 - copy(f.norm[:], []int16{ - 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) - f.symbolLen = 29 - case tableMatchLengths: - //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 - f.actualTableLog = 6 - copy(f.norm[:], []int16{ - 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, - -1, -1, -1, -1, -1}) - f.symbolLen = 53 - } - if err := f.buildDtable(); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - if err := f.transform(symbolTableX[i]); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - f.preDefined = true - - // Create encoder as well - enc := &fsePredefEnc[i] - copy(enc.norm[:], f.norm[:]) - enc.symbolLen = f.symbolLen - enc.actualTableLog = f.actualTableLog - if err := enc.buildCTable(); err != nil { - panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) - } - enc.setBits(bitTables[i]) - enc.preDefined = true - } - }) -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/hash.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/hash.go deleted file mode 100644 index 5d73c21..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/hash.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -const ( - prime3bytes = 506832829 - prime4bytes = 2654435761 - prime5bytes = 889523592379 - prime6bytes = 227718039650203 - prime7bytes = 58295818150454627 - prime8bytes = 0xcf1bbcdcb7a56463 -) - -// hashLen returns a hash of the lowest mls bytes of with length output bits. -// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. -// length should always be < 32. -// Preferably length and mls should be a constant for inlining. -func hashLen(u uint64, length, mls uint8) uint32 { - switch mls { - case 3: - return (uint32(u<<8) * prime3bytes) >> (32 - length) - case 5: - return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) - case 6: - return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) - case 7: - return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) - case 8: - return uint32((u * prime8bytes) >> (64 - length)) - default: - return (uint32(u) * prime4bytes) >> (32 - length) - } -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/history.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/history.go deleted file mode 100644 index 0916485..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/history.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "github.com/klauspost/compress/huff0" -) - -// history contains the information transferred between blocks. -type history struct { - // Literal decompression - huffTree *huff0.Scratch - - // Sequence decompression - decoders sequenceDecs - recentOffsets [3]int - - // History buffer... - b []byte - - // ignoreBuffer is meant to ignore a number of bytes - // when checking for matches in history - ignoreBuffer int - - windowSize int - allocFrameBuffer int // needed? - error bool - dict *dict -} - -// reset will reset the history to initial state of a frame. -// The history must already have been initialized to the desired size. -func (h *history) reset() { - h.b = h.b[:0] - h.ignoreBuffer = 0 - h.error = false - h.recentOffsets = [3]int{1, 4, 8} - h.decoders.freeDecoders() - h.decoders = sequenceDecs{br: h.decoders.br} - h.freeHuffDecoder() - h.huffTree = nil - h.dict = nil - //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) -} - -func (h *history) freeHuffDecoder() { - if h.huffTree != nil { - if h.dict == nil || h.dict.litEnc != h.huffTree { - huffDecoderPool.Put(h.huffTree) - h.huffTree = nil - } - } -} - -func (h *history) setDict(dict *dict) { - if dict == nil { - return - } - h.dict = dict - h.decoders.litLengths = dict.llDec - h.decoders.offsets = dict.ofDec - h.decoders.matchLengths = dict.mlDec - h.decoders.dict = dict.content - h.recentOffsets = dict.offsets - h.huffTree = dict.litEnc -} - -// append bytes to history. -// This function will make sure there is space for it, -// if the buffer has been allocated with enough extra space. -func (h *history) append(b []byte) { - if len(b) >= h.windowSize { - // Discard all history by simply overwriting - h.b = h.b[:h.windowSize] - copy(h.b, b[len(b)-h.windowSize:]) - return - } - - // If there is space, append it. - if len(b) < cap(h.b)-len(h.b) { - h.b = append(h.b, b...) - return - } - - // Move data down so we only have window size left. - // We know we have less than window size in b at this point. - discard := len(b) + len(h.b) - h.windowSize - copy(h.b, h.b[discard:]) - h.b = h.b[:h.windowSize] - copy(h.b[h.windowSize-len(b):], b) -} - -// ensureBlock will ensure there is space for at least one block... -func (h *history) ensureBlock() { - if cap(h.b) < h.allocFrameBuffer { - h.b = make([]byte, 0, h.allocFrameBuffer) - return - } - - avail := cap(h.b) - len(h.b) - if avail >= h.windowSize || avail > maxCompressedBlockSize { - return - } - // Move data down so we only have window size left. - // We know we have less than window size in b at this point. - discard := len(h.b) - h.windowSize - copy(h.b, h.b[discard:]) - h.b = h.b[:h.windowSize] -} - -// append bytes to history without ever discarding anything. -func (h *history) appendKeep(b []byte) { - h.b = append(h.b, b...) -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt deleted file mode 100644 index 24b5306..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md deleted file mode 100644 index 777290d..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# xxhash - -VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. - -xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -The package is written with optimized pure Go and also contains even faster -assembly implementations for amd64 and arm64. If desired, the `purego` build tag -opts into using the Go code even on those architectures. - -[xxHash]: http://cyan4973.github.io/xxHash/ - -## Compatibility - -This package is in a module and the latest code is in version 2 of the module. -You need a version of Go with at least "minimal module compatibility" to use -github.com/cespare/xxhash/v2: - -* 1.9.7+ for Go 1.9 -* 1.10.3+ for Go 1.10 -* Go 1.11 or later - -I recommend using the latest release of Go. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| ---------- | --------- | --------- | -| 4 B | 1.3 GB/s | 1.2 GB/s | -| 16 B | 2.9 GB/s | 3.5 GB/s | -| 100 B | 6.9 GB/s | 8.1 GB/s | -| 4 KB | 11.7 GB/s | 16.7 GB/s | -| 10 MB | 12.0 GB/s | 17.3 GB/s | - -These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C -CPU using the following commands under Go 1.19.2: - -``` -benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') -benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) -- [FreeCache](https://github.com/coocood/freecache) -- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go deleted file mode 100644 index fc40c82..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go +++ /dev/null @@ -1,230 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. - -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// Store the primes in an array as well. -// -// The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. -var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} - -// Digest implements hash.Hash64. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest that computes the 64-bit xxHash algorithm. -func New() *Digest { - var d Digest - d.Reset() - return &d -} - -// Reset clears the Digest's state so that it can be reused. -func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - memleft := d.mem[d.n&(len(d.mem)-1):] - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(memleft, b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - c := copy(memleft, b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[c:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - b := d.mem[:d.n&(len(d.mem)-1)] - for ; len(b) >= 8; b = b[8:] { - k1 := round(0, u64(b[:8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if len(b) >= 4 { - h ^= uint64(u32(b[:4])) * prime1 - h = rol23(h)*prime2 + prime3 - b = b[4:] - } - for ; len(b) > 0; b = b[1:] { - h ^= uint64(b[0]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s deleted file mode 100644 index ddb63aa..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ /dev/null @@ -1,210 +0,0 @@ -//go:build !appengine && gc && !purego && !noasm -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -#include "textflag.h" - -// Registers: -#define h AX -#define d AX -#define p SI // pointer to advance through b -#define n DX -#define end BX // loop end -#define v1 R8 -#define v2 R9 -#define v3 R10 -#define v4 R11 -#define x R12 -#define prime1 R13 -#define prime2 R14 -#define prime4 DI - -#define round(acc, x) \ - IMULQ prime2, x \ - ADDQ x, acc \ - ROLQ $31, acc \ - IMULQ prime1, acc - -// round0 performs the operation x = round(0, x). -#define round0(x) \ - IMULQ prime2, x \ - ROLQ $31, x \ - IMULQ prime1, x - -// mergeRound applies a merge round on the two registers acc and x. -// It assumes that prime1, prime2, and prime4 have been loaded. -#define mergeRound(acc, x) \ - round0(x) \ - XORQ x, acc \ - IMULQ prime1, acc \ - ADDQ prime4, acc - -// blockLoop processes as many 32-byte blocks as possible, -// updating v1, v2, v3, and v4. It assumes that there is at least one block -// to process. -#define blockLoop() \ -loop: \ - MOVQ +0(p), x \ - round(v1, x) \ - MOVQ +8(p), x \ - round(v2, x) \ - MOVQ +16(p), x \ - round(v3, x) \ - MOVQ +24(p), x \ - round(v4, x) \ - ADDQ $32, p \ - CMPQ p, end \ - JLE loop - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 - // Load fixed primes. - MOVQ ·primes+0(SB), prime1 - MOVQ ·primes+8(SB), prime2 - MOVQ ·primes+24(SB), prime4 - - // Load slice. - MOVQ b_base+0(FP), p - MOVQ b_len+8(FP), n - LEAQ (p)(n*1), end - - // The first loop limit will be len(b)-32. - SUBQ $32, end - - // Check whether we have at least one block. - CMPQ n, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ prime1, v1 - ADDQ prime2, v1 - MOVQ prime2, v2 - XORQ v3, v3 - XORQ v4, v4 - SUBQ prime1, v4 - - blockLoop() - - MOVQ v1, h - ROLQ $1, h - MOVQ v2, x - ROLQ $7, x - ADDQ x, h - MOVQ v3, x - ROLQ $12, x - ADDQ x, h - MOVQ v4, x - ROLQ $18, x - ADDQ x, h - - mergeRound(h, v1) - mergeRound(h, v2) - mergeRound(h, v3) - mergeRound(h, v4) - - JMP afterBlocks - -noBlocks: - MOVQ ·primes+32(SB), h - -afterBlocks: - ADDQ n, h - - ADDQ $24, end - CMPQ p, end - JG try4 - -loop8: - MOVQ (p), x - ADDQ $8, p - round0(x) - XORQ x, h - ROLQ $27, h - IMULQ prime1, h - ADDQ prime4, h - - CMPQ p, end - JLE loop8 - -try4: - ADDQ $4, end - CMPQ p, end - JG try1 - - MOVL (p), x - ADDQ $4, p - IMULQ prime1, x - XORQ x, h - - ROLQ $23, h - IMULQ prime2, h - ADDQ ·primes+16(SB), h - -try1: - ADDQ $4, end - CMPQ p, end - JGE finalize - -loop1: - MOVBQZX (p), x - ADDQ $1, p - IMULQ ·primes+32(SB), x - XORQ x, h - ROLQ $11, h - IMULQ prime1, h - - CMPQ p, end - JL loop1 - -finalize: - MOVQ h, x - SHRQ $33, x - XORQ x, h - IMULQ prime2, h - MOVQ h, x - SHRQ $29, x - XORQ x, h - IMULQ ·primes+16(SB), h - MOVQ h, x - SHRQ $32, x - XORQ x, h - - MOVQ h, ret+24(FP) - RET - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 - // Load fixed primes needed for round. - MOVQ ·primes+0(SB), prime1 - MOVQ ·primes+8(SB), prime2 - - // Load slice. - MOVQ b_base+8(FP), p - MOVQ b_len+16(FP), n - LEAQ (p)(n*1), end - SUBQ $32, end - - // Load vN from d. - MOVQ s+0(FP), d - MOVQ 0(d), v1 - MOVQ 8(d), v2 - MOVQ 16(d), v3 - MOVQ 24(d), v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. - blockLoop() - - // Copy vN back to d. - MOVQ v1, 0(d) - MOVQ v2, 8(d) - MOVQ v3, 16(d) - MOVQ v4, 24(d) - - // The number of bytes written is p minus the old base pointer. - SUBQ b_base+8(FP), p - MOVQ p, ret+32(FP) - - RET diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s deleted file mode 100644 index 17901e0..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ /dev/null @@ -1,184 +0,0 @@ -//go:build !appengine && gc && !purego && !noasm -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -#include "textflag.h" - -// Registers: -#define digest R1 -#define h R2 // return value -#define p R3 // input pointer -#define n R4 // input length -#define nblocks R5 // n / 32 -#define prime1 R7 -#define prime2 R8 -#define prime3 R9 -#define prime4 R10 -#define prime5 R11 -#define v1 R12 -#define v2 R13 -#define v3 R14 -#define v4 R15 -#define x1 R20 -#define x2 R21 -#define x3 R22 -#define x4 R23 - -#define round(acc, x) \ - MADD prime2, acc, x, acc \ - ROR $64-31, acc \ - MUL prime1, acc - -// round0 performs the operation x = round(0, x). -#define round0(x) \ - MUL prime2, x \ - ROR $64-31, x \ - MUL prime1, x - -#define mergeRound(acc, x) \ - round0(x) \ - EOR x, acc \ - MADD acc, prime4, prime1, acc - -// blockLoop processes as many 32-byte blocks as possible, -// updating v1, v2, v3, and v4. It assumes that n >= 32. -#define blockLoop() \ - LSR $5, n, nblocks \ - PCALIGN $16 \ - loop: \ - LDP.P 16(p), (x1, x2) \ - LDP.P 16(p), (x3, x4) \ - round(v1, x1) \ - round(v2, x2) \ - round(v3, x3) \ - round(v4, x4) \ - SUB $1, nblocks \ - CBNZ nblocks, loop - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 - LDP b_base+0(FP), (p, n) - - LDP ·primes+0(SB), (prime1, prime2) - LDP ·primes+16(SB), (prime3, prime4) - MOVD ·primes+32(SB), prime5 - - CMP $32, n - CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } - BLT afterLoop - - ADD prime1, prime2, v1 - MOVD prime2, v2 - MOVD $0, v3 - NEG prime1, v4 - - blockLoop() - - ROR $64-1, v1, x1 - ROR $64-7, v2, x2 - ADD x1, x2 - ROR $64-12, v3, x3 - ROR $64-18, v4, x4 - ADD x3, x4 - ADD x2, x4, h - - mergeRound(h, v1) - mergeRound(h, v2) - mergeRound(h, v3) - mergeRound(h, v4) - -afterLoop: - ADD n, h - - TBZ $4, n, try8 - LDP.P 16(p), (x1, x2) - - round0(x1) - - // NOTE: here and below, sequencing the EOR after the ROR (using a - // rotated register) is worth a small but measurable speedup for small - // inputs. - ROR $64-27, h - EOR x1 @> 64-27, h, h - MADD h, prime4, prime1, h - - round0(x2) - ROR $64-27, h - EOR x2 @> 64-27, h, h - MADD h, prime4, prime1, h - -try8: - TBZ $3, n, try4 - MOVD.P 8(p), x1 - - round0(x1) - ROR $64-27, h - EOR x1 @> 64-27, h, h - MADD h, prime4, prime1, h - -try4: - TBZ $2, n, try2 - MOVWU.P 4(p), x2 - - MUL prime1, x2 - ROR $64-23, h - EOR x2 @> 64-23, h, h - MADD h, prime3, prime2, h - -try2: - TBZ $1, n, try1 - MOVHU.P 2(p), x3 - AND $255, x3, x1 - LSR $8, x3, x2 - - MUL prime5, x1 - ROR $64-11, h - EOR x1 @> 64-11, h, h - MUL prime1, h - - MUL prime5, x2 - ROR $64-11, h - EOR x2 @> 64-11, h, h - MUL prime1, h - -try1: - TBZ $0, n, finalize - MOVBU (p), x4 - - MUL prime5, x4 - ROR $64-11, h - EOR x4 @> 64-11, h, h - MUL prime1, h - -finalize: - EOR h >> 33, h - MUL prime2, h - EOR h >> 29, h - MUL prime3, h - EOR h >> 32, h - - MOVD h, ret+24(FP) - RET - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 - LDP ·primes+0(SB), (prime1, prime2) - - // Load state. Assume v[1-4] are stored contiguously. - MOVD d+0(FP), digest - LDP 0(digest), (v1, v2) - LDP 16(digest), (v3, v4) - - LDP b_base+8(FP), (p, n) - - blockLoop() - - // Store updated state. - STP (v1, v2), 0(digest) - STP (v3, v4), 16(digest) - - BIC $31, n - MOVD n, ret+32(FP) - RET diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go deleted file mode 100644 index d4221ed..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm -// +build amd64 arm64 -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(s *Digest, b []byte) int diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go deleted file mode 100644 index 0be16ce..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm -// +build !amd64,!arm64 appengine !gc purego noasm - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := primes[0] + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -primes[0] - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - for ; len(b) >= 8; b = b[8:] { - k1 := round(0, u64(b[:8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if len(b) >= 4 { - h ^= uint64(u32(b[:4])) * prime1 - h = rol23(h)*prime2 + prime3 - b = b[4:] - } - for ; len(b) > 0; b = b[1:] { - h ^= uint64(b[0]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go deleted file mode 100644 index 6f3b0cb..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go +++ /dev/null @@ -1,11 +0,0 @@ -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go deleted file mode 100644 index f41932b..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -// matchLen returns how many bytes match in a and b -// -// It assumes that: -// -// len(a) <= len(b) and len(a) > 0 -// -//go:noescape -func matchLen(a []byte, b []byte) int diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s deleted file mode 100644 index 9a7655c..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s +++ /dev/null @@ -1,68 +0,0 @@ -// Copied from S2 implementation. - -//go:build !appengine && !noasm && gc && !noasm - -#include "textflag.h" - -// func matchLen(a []byte, b []byte) int -// Requires: BMI -TEXT ·matchLen(SB), NOSPLIT, $0-56 - MOVQ a_base+0(FP), AX - MOVQ b_base+24(FP), CX - MOVQ a_len+8(FP), DX - - // matchLen - XORL SI, SI - CMPL DX, $0x08 - JB matchlen_match4_standalone - -matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone - -#ifdef GOAMD64_v3 - TZCNTQ BX, BX -#else - BSFQ BX, BX -#endif - SARQ $0x03, BX - LEAL (SI)(BX*1), SI - JMP gen_match_len_end - -matchlen_loop_standalone: - LEAL -8(DX), DX - LEAL 8(SI), SI - CMPL DX, $0x08 - JAE matchlen_loopback_standalone - -matchlen_match4_standalone: - CMPL DX, $0x04 - JB matchlen_match2_standalone - MOVL (AX)(SI*1), BX - CMPL (CX)(SI*1), BX - JNE matchlen_match2_standalone - LEAL -4(DX), DX - LEAL 4(SI), SI - -matchlen_match2_standalone: - CMPL DX, $0x02 - JB matchlen_match1_standalone - MOVW (AX)(SI*1), BX - CMPW (CX)(SI*1), BX - JNE matchlen_match1_standalone - LEAL -2(DX), DX - LEAL 2(SI), SI - -matchlen_match1_standalone: - CMPL DX, $0x01 - JB gen_match_len_end - MOVB (AX)(SI*1), BL - CMPB (CX)(SI*1), BL - JNE gen_match_len_end - INCL SI - -gen_match_len_end: - MOVQ SI, ret+48(FP) - RET diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go deleted file mode 100644 index 57b9c31..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "encoding/binary" - "math/bits" -) - -// matchLen returns the maximum common prefix length of a and b. -// a must be the shortest of the two. -func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) - if diff != 0 { - return n + bits.TrailingZeros64(diff)>>3 - } - n += 8 - } - - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n - -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqdec.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqdec.go deleted file mode 100644 index d7fe6d8..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ /dev/null @@ -1,503 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "io" -) - -type seq struct { - litLen uint32 - matchLen uint32 - offset uint32 - - // Codes are stored here for the encoder - // so they only have to be looked up once. - llCode, mlCode, ofCode uint8 -} - -type seqVals struct { - ll, ml, mo int -} - -func (s seq) String() string { - if s.offset <= 3 { - if s.offset == 0 { - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") -} - -type seqCompMode uint8 - -const ( - compModePredefined seqCompMode = iota - compModeRLE - compModeFSE - compModeRepeat -) - -type sequenceDec struct { - // decoder keeps track of the current state and updates it from the bitstream. - fse *fseDecoder - state fseState - repeat bool -} - -// init the state of the decoder with input from stream. -func (s *sequenceDec) init(br *bitReader) error { - if s.fse == nil { - return errors.New("sequence decoder not defined") - } - s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { - addBytes := s.seqSize + len(s.out) - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - for _, seq := range seqs { - // Add literals - copy(out[t:], s.literals[:seq.ll]) - t += seq.ll - s.literals = s.literals[seq.ll:] - - // Copy from dictionary... - if seq.mo > t+len(hist) || seq.mo > s.windowSize { - if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) - } - - // we may be in dictionary. - dictO := len(s.dict) - (seq.mo - (t + len(hist))) - if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) - } - end := dictO + seq.ml - if end > len(s.dict) { - n := len(s.dict) - dictO - copy(out[t:], s.dict[dictO:]) - t += n - seq.ml -= n - } else { - copy(out[t:], s.dict[dictO:end]) - t += end - dictO - continue - } - } - - // Copy from history. - if v := seq.mo - t; v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if seq.ml > v { - // Some goes into current block. - // Copy remainder of history - copy(out[t:], hist[start:]) - t += v - seq.ml -= v - } else { - copy(out[t:], hist[start:start+seq.ml]) - t += seq.ml - continue - } - } - // We must be in current buffer now - if seq.ml > 0 { - start := t - seq.mo - if seq.ml <= t-start { - // No overlap - copy(out[t:], out[start:start+seq.ml]) - t += seq.ml - continue - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - src := out[start : start+seq.ml] - dst := out[t:] - dst = dst[:len(src)] - t += len(src) - // Destination is the space we just added. - for i := range src { - dst[i] = src[i] - } - } - } - } - - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} - -// decode sequences from the stream with the provided history. -func (s *sequenceDecs) decodeSync(hist []byte) error { - supported, err := s.decodeSyncSimple(hist) - if supported { - return err - } - - br := s.br - seqs := s.nSeqs - startSize := len(s.out) - // Grab full sizes tables, to avoid bounds checks. - llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] - llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - out := s.out - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - if debugDecoder { - println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") - } - for i := seqs - 1; i >= 0; i-- { - if br.overread() { - printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) - return io.ErrUnexpectedEOF - } - var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("WARNING: temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - - if ll > len(s.literals) { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) - } - size := ll + ml + len(out) - if size-startSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - if size > cap(out) { - // Not enough size, which can happen under high volume block streaming conditions - // but could be if destination slice is too small for sync operations. - // over-allocating here can create a large amount of GC pressure so we try to keep - // it as contained as possible - used := len(out) - startSize - addBytes := 256 + ll + ml + used>>2 - // Clamp to max block size. - if used+addBytes > maxBlockSize { - addBytes = maxBlockSize - used - } - out = append(out, make([]byte, addBytes)...) - out = out[:len(out)-addBytes] - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - - // Add literals - out = append(out, s.literals[:ll]...) - s.literals = s.literals[ll:] - - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - - if mo > len(out)+len(hist) || mo > s.windowSize { - if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) - } - - // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(out) + len(hist))) - if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) - } - end := dictO + ml - if end > len(s.dict) { - out = append(out, s.dict[dictO:]...) - ml -= len(s.dict) - dictO - } else { - out = append(out, s.dict[dictO:end]...) - mo = 0 - ml = 0 - } - } - - // Copy from history. - // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(out); v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if ml > v { - // Some goes into current block. - // Copy remainder of history - out = append(out, hist[start:]...) - ml -= v - } else { - out = append(out, hist[start:start+ml]...) - ml = 0 - } - } - // We must be in current buffer now - if ml > 0 { - start := len(out) - mo - if ml <= len(out)-start { - // No overlap - out = append(out, out[start:start+ml]...) - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - out = out[:len(out)+ml] - src := out[start : start+ml] - // Destination is the space we just added. - dst := out[len(out)-ml:] - dst = dst[:len(src)] - for i := range src { - dst[i] = src[i] - } - } - } - if i == 0 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.get32BitsFast(nBits) - - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - - if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - - // Add final literals - s.out = append(out, s.literals...) - return br.close() -} - -var bitMask [16]uint16 - -func init() { - for i := range bitMask[:] { - bitMask[i] = uint16((1 << uint(i)) - 1) - } -} - -func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fill() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fill() - } - // matchlength+literal length, max 32 bits - ml += br.getBits(mlB) - ll += br.getBits(llB) - mo = s.adjustOffset(mo, ll, moB) - return -} - -func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { - if offsetB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = offset - return offset - } - - if litLen == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - offset++ - } - - if offset == 0 { - return s.prevOffset[0] - } - var temp int - if offset == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[offset] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if offset != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - return temp -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go deleted file mode 100644 index 8adabd8..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ /dev/null @@ -1,394 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package zstd - -import ( - "fmt" - "io" - - "github.com/klauspost/compress/internal/cpuinfo" -) - -type decodeSyncAsmContext struct { - llTable []decSymbol - mlTable []decSymbol - ofTable []decSymbol - llState uint64 - mlState uint64 - ofState uint64 - iteration int - litRemain int - out []byte - outPosition int - literals []byte - litPosition int - history []byte - windowSize int - ll int // set on error (not for all errors, please refer to _generate/gen.go) - ml int // set on error (not for all errors, please refer to _generate/gen.go) - mo int // set on error (not for all errors, please refer to _generate/gen.go) -} - -// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. -// -//go:noescape -func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. -// -//go:noescape -func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// decode sequences from the stream with the provided history but without a dictionary. -func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { - if len(s.dict) > 0 { - return false, nil - } - if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { - return false, nil - } - - // FIXME: Using unsafe memory copies leads to rare, random crashes - // with fuzz testing. It is therefore disabled for now. - const useSafe = true - /* - useSafe := false - if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { - useSafe = true - } - if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { - useSafe = true - } - if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { - useSafe = true - } - */ - - br := s.br - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - ctx := decodeSyncAsmContext{ - llTable: s.litLengths.fse.dt[:maxTablesize], - mlTable: s.matchLengths.fse.dt[:maxTablesize], - ofTable: s.offsets.fse.dt[:maxTablesize], - llState: uint64(s.litLengths.state.state), - mlState: uint64(s.matchLengths.state.state), - ofState: uint64(s.offsets.state.state), - iteration: s.nSeqs - 1, - litRemain: len(s.literals), - out: s.out, - outPosition: len(s.out), - literals: s.literals, - windowSize: s.windowSize, - history: hist, - } - - s.seqSize = 0 - startSize := len(s.out) - - var errCode int - if cpuinfo.HasBMI2() { - if useSafe { - errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) - } else { - errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) - } - } else { - if useSafe { - errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) - } else { - errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) - } - } - switch errCode { - case noError: - break - - case errorMatchLenOfsMismatch: - return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) - - case errorMatchLenTooBig: - return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) - - case errorMatchOffTooBig: - return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", - ctx.mo, ctx.outPosition+len(hist)-startSize) - - case errorNotEnoughLiterals: - return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", - ctx.ll, ctx.litRemain+ctx.ll) - - case errorOverread: - return true, io.ErrUnexpectedEOF - - case errorNotEnoughSpace: - size := ctx.outPosition + ctx.ll + ctx.ml - if debugDecoder { - println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) - } - return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - - default: - return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) - } - - s.seqSize += ctx.litRemain - if s.seqSize > maxBlockSize { - return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - return true, err - } - - s.literals = s.literals[ctx.litPosition:] - t := ctx.outPosition - s.out = s.out[:t] - - // Add final literals - s.out = append(s.out, s.literals...) - if debugDecoder { - t += len(s.literals) - if t != len(s.out) { - panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) - } - } - - return true, nil -} - -// -------------------------------------------------------------------------------- - -type decodeAsmContext struct { - llTable []decSymbol - mlTable []decSymbol - ofTable []decSymbol - llState uint64 - mlState uint64 - ofState uint64 - iteration int - seqs []seqVals - litRemain int -} - -const noError = 0 - -// error reported when mo == 0 && ml > 0 -const errorMatchLenOfsMismatch = 1 - -// error reported when ml > maxMatchLen -const errorMatchLenTooBig = 2 - -// error reported when mo > available history or mo > s.windowSize -const errorMatchOffTooBig = 3 - -// error reported when the sum of literal lengths exeeceds the literal buffer size -const errorNotEnoughLiterals = 4 - -// error reported when capacity of `out` is too small -const errorNotEnoughSpace = 5 - -// error reported when bits are overread. -const errorOverread = 6 - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// decode sequences from the stream without the provided history. -func (s *sequenceDecs) decode(seqs []seqVals) error { - br := s.br - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - ctx := decodeAsmContext{ - llTable: s.litLengths.fse.dt[:maxTablesize], - mlTable: s.matchLengths.fse.dt[:maxTablesize], - ofTable: s.offsets.fse.dt[:maxTablesize], - llState: uint64(s.litLengths.state.state), - mlState: uint64(s.matchLengths.state.state), - ofState: uint64(s.offsets.state.state), - seqs: seqs, - iteration: len(seqs) - 1, - litRemain: len(s.literals), - } - - if debugDecoder { - println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") - } - - s.seqSize = 0 - lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 - var errCode int - if cpuinfo.HasBMI2() { - if lte56bits { - errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) - } else { - errCode = sequenceDecs_decode_bmi2(s, br, &ctx) - } - } else { - if lte56bits { - errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) - } else { - errCode = sequenceDecs_decode_amd64(s, br, &ctx) - } - } - if errCode != 0 { - i := len(seqs) - ctx.iteration - 1 - switch errCode { - case errorMatchLenOfsMismatch: - ml := ctx.seqs[i].ml - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - - case errorMatchLenTooBig: - ml := ctx.seqs[i].ml - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - - case errorNotEnoughLiterals: - ll := ctx.seqs[i].ll - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) - case errorOverread: - return io.ErrUnexpectedEOF - } - - return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) - } - - if ctx.litRemain < 0 { - return fmt.Errorf("literal count is too big: total available %d, total requested %d", - len(s.literals), len(s.literals)-ctx.litRemain) - } - - s.seqSize += ctx.litRemain - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - if debugDecoder { - println("decode: ", br.remain(), "bits remain on stream. code:", errCode) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - return err -} - -// -------------------------------------------------------------------------------- - -type executeAsmContext struct { - seqs []seqVals - seqIndex int - out []byte - history []byte - literals []byte - outPosition int - litPosition int - windowSize int -} - -// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. -// -// Returns false if a match offset is too big. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool - -// Same as above, but with safe memcopies -// -//go:noescape -func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool - -// executeSimple handles cases when dictionary is not used. -func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { - // Ensure we have enough output size... - if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { - addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - ctx := executeAsmContext{ - seqs: seqs, - seqIndex: 0, - out: out, - history: hist, - outPosition: t, - litPosition: 0, - literals: s.literals, - windowSize: s.windowSize, - } - var ok bool - if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { - ok = sequenceDecs_executeSimple_safe_amd64(&ctx) - } else { - ok = sequenceDecs_executeSimple_amd64(&ctx) - } - if !ok { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", - seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) - } - s.literals = s.literals[ctx.litPosition:] - t = ctx.outPosition - - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s deleted file mode 100644 index 974b997..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ /dev/null @@ -1,4175 +0,0 @@ -// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. - -//go:build !appengine && !noasm && gc && !noasm - -// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: CMOV -TEXT ·sequenceDecs_decode_amd64(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - MOVQ 104(AX), R10 - MOVQ s+0(FP), AX - MOVQ 144(AX), R11 - MOVQ 152(AX), R12 - MOVQ 160(AX), R13 - -sequenceDecs_decode_amd64_main_loop: - MOVQ (SP), R14 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decode_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_amd64_fill_end - -sequenceDecs_decode_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decode_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_amd64_fill_byte_by_byte - -sequenceDecs_decode_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decode_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_of_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_of_update_zero: - MOVQ AX, 16(R10) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_ml_update_zero: - MOVQ AX, 8(R10) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decode_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_amd64_fill_2_end - -sequenceDecs_decode_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_2_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decode_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte - -sequenceDecs_decode_amd64_fill_2_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decode_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_ll_update_zero: - MOVQ AX, (R10) - - // Fill bitreader for state updates - MOVQ R14, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decode_amd64_skip_update: - // Adjust offset - MOVQ 16(R10), CX - CMPQ AX, $0x01 - JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 - MOVQ R12, R13 - MOVQ R11, R12 - MOVQ CX, R11 - JMP sequenceDecs_decode_amd64_after_adjust - -sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: - CMPQ (R10), $0x00000000 - JNE sequenceDecs_decode_amd64_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_amd64_adjust_offset_nonzero - -sequenceDecs_decode_amd64_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero - MOVQ R11, CX - JMP sequenceDecs_decode_amd64_after_adjust - -sequenceDecs_decode_amd64_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_amd64_adjust_zero - JEQ sequenceDecs_decode_amd64_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_amd64_adjust_three - JMP sequenceDecs_decode_amd64_adjust_two - -sequenceDecs_decode_amd64_adjust_zero: - MOVQ R11, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_one: - MOVQ R12, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_two: - MOVQ R13, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_three: - LEAQ -1(R11), AX - -sequenceDecs_decode_amd64_adjust_test_temp_valid: - TESTQ AX, AX - JNZ sequenceDecs_decode_amd64_adjust_temp_valid - MOVQ $0x00000001, AX - -sequenceDecs_decode_amd64_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R12, R13 - MOVQ R11, R12 - MOVQ AX, R11 - MOVQ AX, CX - -sequenceDecs_decode_amd64_after_adjust: - MOVQ CX, 16(R10) - - // Check values - MOVQ 8(R10), AX - MOVQ (R10), R14 - LEAQ (AX)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decode_amd64_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decode_amd64_match_len_ofs_ok: - ADDQ $0x18, R10 - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decode_amd64_main_loop - MOVQ s+0(FP), AX - MOVQ R11, 144(AX) - MOVQ R12, 152(AX) - MOVQ R13, 160(AX) - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_amd64_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: CMOV -TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - MOVQ 104(AX), R10 - MOVQ s+0(FP), AX - MOVQ 144(AX), R11 - MOVQ 152(AX), R12 - MOVQ 160(AX), R13 - -sequenceDecs_decode_56_amd64_main_loop: - MOVQ (SP), R14 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decode_56_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_56_amd64_fill_end - -sequenceDecs_decode_56_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_56_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decode_56_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte - -sequenceDecs_decode_56_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decode_56_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_of_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_of_update_zero: - MOVQ AX, 16(R10) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_ml_update_zero: - MOVQ AX, 8(R10) - - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_ll_update_zero: - MOVQ AX, (R10) - - // Fill bitreader for state updates - MOVQ R14, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_56_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decode_56_amd64_skip_update: - // Adjust offset - MOVQ 16(R10), CX - CMPQ AX, $0x01 - JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 - MOVQ R12, R13 - MOVQ R11, R12 - MOVQ CX, R11 - JMP sequenceDecs_decode_56_amd64_after_adjust - -sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: - CMPQ (R10), $0x00000000 - JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero - -sequenceDecs_decode_56_amd64_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero - MOVQ R11, CX - JMP sequenceDecs_decode_56_amd64_after_adjust - -sequenceDecs_decode_56_amd64_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_56_amd64_adjust_zero - JEQ sequenceDecs_decode_56_amd64_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_56_amd64_adjust_three - JMP sequenceDecs_decode_56_amd64_adjust_two - -sequenceDecs_decode_56_amd64_adjust_zero: - MOVQ R11, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_one: - MOVQ R12, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_two: - MOVQ R13, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_three: - LEAQ -1(R11), AX - -sequenceDecs_decode_56_amd64_adjust_test_temp_valid: - TESTQ AX, AX - JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid - MOVQ $0x00000001, AX - -sequenceDecs_decode_56_amd64_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R12, R13 - MOVQ R11, R12 - MOVQ AX, R11 - MOVQ AX, CX - -sequenceDecs_decode_56_amd64_after_adjust: - MOVQ CX, 16(R10) - - // Check values - MOVQ 8(R10), AX - MOVQ (R10), R14 - LEAQ (AX)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decode_56_amd64_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decode_56_amd64_match_len_ofs_ok: - ADDQ $0x18, R10 - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decode_56_amd64_main_loop - MOVQ s+0(FP), AX - MOVQ R11, 144(AX) - MOVQ R12, 152(AX) - MOVQ R13, 160(AX) - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_56_amd64_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: BMI, BMI2, CMOV -TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - MOVQ 104(CX), R9 - MOVQ s+0(FP), CX - MOVQ 144(CX), R10 - MOVQ 152(CX), R11 - MOVQ 160(CX), R12 - -sequenceDecs_decode_bmi2_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decode_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_bmi2_fill_end - -sequenceDecs_decode_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decode_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_bmi2_fill_byte_by_byte - -sequenceDecs_decode_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decode_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 16(R9) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 8(R9) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_bmi2_fill_2_end - -sequenceDecs_decode_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_2_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decode_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte - -sequenceDecs_decode_bmi2_fill_2_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decode_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, (R9) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_bmi2_skip_update - LEAQ (SI)(DI*1), R14 - ADDQ R8, R14 - MOVBQZX R14, R14 - LEAQ (DX)(R14*1), CX - MOVQ AX, R15 - MOVQ CX, DX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - - // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decode_bmi2_skip_update: - // Adjust offset - MOVQ 16(R9), CX - CMPQ R13, $0x01 - JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 - MOVQ R11, R12 - MOVQ R10, R11 - MOVQ CX, R10 - JMP sequenceDecs_decode_bmi2_after_adjust - -sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: - CMPQ (R9), $0x00000000 - JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero - -sequenceDecs_decode_bmi2_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero - MOVQ R10, CX - JMP sequenceDecs_decode_bmi2_after_adjust - -sequenceDecs_decode_bmi2_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_bmi2_adjust_zero - JEQ sequenceDecs_decode_bmi2_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_bmi2_adjust_three - JMP sequenceDecs_decode_bmi2_adjust_two - -sequenceDecs_decode_bmi2_adjust_zero: - MOVQ R10, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_one: - MOVQ R11, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_two: - MOVQ R12, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_three: - LEAQ -1(R10), R13 - -sequenceDecs_decode_bmi2_adjust_test_temp_valid: - TESTQ R13, R13 - JNZ sequenceDecs_decode_bmi2_adjust_temp_valid - MOVQ $0x00000001, R13 - -sequenceDecs_decode_bmi2_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R11, R12 - MOVQ R10, R11 - MOVQ R13, R10 - MOVQ R13, CX - -sequenceDecs_decode_bmi2_after_adjust: - MOVQ CX, 16(R9) - - // Check values - MOVQ 8(R9), R13 - MOVQ (R9), R14 - LEAQ (R13)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ R13, $0x00020002 - JA sequenceDecs_decode_bmi2_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok - TESTQ R13, R13 - JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decode_bmi2_match_len_ofs_ok: - ADDQ $0x18, R9 - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decode_bmi2_main_loop - MOVQ s+0(FP), CX - MOVQ R10, 144(CX) - MOVQ R11, 152(CX) - MOVQ R12, 160(CX) - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_bmi2_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: BMI, BMI2, CMOV -TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - MOVQ 104(CX), R9 - MOVQ s+0(FP), CX - MOVQ 144(CX), R10 - MOVQ 152(CX), R11 - MOVQ 160(CX), R12 - -sequenceDecs_decode_56_bmi2_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_56_bmi2_fill_end - -sequenceDecs_decode_56_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_56_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decode_56_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte - -sequenceDecs_decode_56_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decode_56_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 16(R9) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 8(R9) - - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, (R9) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_56_bmi2_skip_update - LEAQ (SI)(DI*1), R14 - ADDQ R8, R14 - MOVBQZX R14, R14 - LEAQ (DX)(R14*1), CX - MOVQ AX, R15 - MOVQ CX, DX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - - // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decode_56_bmi2_skip_update: - // Adjust offset - MOVQ 16(R9), CX - CMPQ R13, $0x01 - JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 - MOVQ R11, R12 - MOVQ R10, R11 - MOVQ CX, R10 - JMP sequenceDecs_decode_56_bmi2_after_adjust - -sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: - CMPQ (R9), $0x00000000 - JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero - -sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero - MOVQ R10, CX - JMP sequenceDecs_decode_56_bmi2_after_adjust - -sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_56_bmi2_adjust_zero - JEQ sequenceDecs_decode_56_bmi2_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_56_bmi2_adjust_three - JMP sequenceDecs_decode_56_bmi2_adjust_two - -sequenceDecs_decode_56_bmi2_adjust_zero: - MOVQ R10, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_one: - MOVQ R11, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_two: - MOVQ R12, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_three: - LEAQ -1(R10), R13 - -sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: - TESTQ R13, R13 - JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid - MOVQ $0x00000001, R13 - -sequenceDecs_decode_56_bmi2_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R11, R12 - MOVQ R10, R11 - MOVQ R13, R10 - MOVQ R13, CX - -sequenceDecs_decode_56_bmi2_after_adjust: - MOVQ CX, 16(R9) - - // Check values - MOVQ 8(R9), R13 - MOVQ (R9), R14 - LEAQ (R13)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ R13, $0x00020002 - JA sequenceDecs_decode_56_bmi2_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok - TESTQ R13, R13 - JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decode_56_bmi2_match_len_ofs_ok: - ADDQ $0x18, R9 - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decode_56_bmi2_main_loop - MOVQ s+0(FP), CX - MOVQ R10, 144(CX) - MOVQ R11, 152(CX) - MOVQ R12, 160(CX) - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_56_bmi2_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool -// Requires: SSE -TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 - MOVQ ctx+0(FP), R10 - MOVQ 8(R10), CX - TESTQ CX, CX - JZ empty_seqs - MOVQ (R10), AX - MOVQ 24(R10), DX - MOVQ 32(R10), BX - MOVQ 80(R10), SI - MOVQ 104(R10), DI - MOVQ 120(R10), R8 - MOVQ 56(R10), R9 - MOVQ 64(R10), R10 - ADDQ R10, R9 - - // seqsBase += 24 * seqIndex - LEAQ (DX)(DX*2), R11 - SHLQ $0x03, R11 - ADDQ R11, AX - - // outBase += outPosition - ADDQ DI, BX - -main_loop: - MOVQ (AX), R11 - MOVQ 16(AX), R12 - MOVQ 8(AX), R13 - - // Copy literals - TESTQ R11, R11 - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (SI)(R14*1), X0 - MOVUPS X0, (BX)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, R11 - JB copy_1 - ADDQ R11, SI - ADDQ R11, BX - ADDQ R11, DI - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - LEAQ (DI)(R10*1), R11 - CMPQ R12, R11 - JG error_match_off_too_big - CMPQ R12, R8 - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, R11 - SUBQ DI, R11 - JLS copy_match - MOVQ R9, R14 - SUBQ R11, R14 - CMPQ R13, R11 - JG copy_all_from_history - MOVQ R13, R11 - SUBQ $0x10, R11 - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R11 - JAE copy_4_loop - LEAQ 16(R14)(R11*1), R14 - LEAQ 16(BX)(R11*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), R11 - MOVB 2(R14), R12 - MOVW R11, (BX) - MOVB R12, 2(BX) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), R11 - MOVL -4(R14)(R13*1), R12 - MOVL R11, (BX) - MOVL R12, -4(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), R11 - MOVQ -8(R14)(R13*1), R12 - MOVQ R11, (BX) - MOVQ R12, -8(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - -copy_4_end: - ADDQ R13, DI - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - JMP loop_finished - -copy_all_from_history: - MOVQ R11, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(BX)(R15*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_5_end - -copy_5_small: - CMPQ R11, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ R11, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(R11*1), BP - MOVB R15, (BX) - MOVB BP, -1(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (BX) - MOVB BP, 2(BX) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(R11*1), BP - MOVL R15, (BX) - MOVL BP, -4(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(R11*1), BP - MOVQ R15, (BX) - MOVQ BP, -8(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - -copy_5_end: - ADDQ R11, DI - SUBQ R11, R13 - - // Copy match from the current buffer -copy_match: - MOVQ BX, R11 - SUBQ R12, R11 - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, DI - MOVQ BX, R12 - ADDQ R13, BX - -copy_2: - MOVUPS (R11), X0 - MOVUPS X0, (R12) - ADDQ $0x10, R11 - ADDQ $0x10, R12 - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, DI - -copy_slow_3: - MOVB (R11), R12 - MOVB R12, (BX) - INCQ R11 - INCQ BX - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - -loop_finished: - // Return value - MOVB $0x01, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -error_match_off_too_big: - // Return value - MOVB $0x00, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -empty_seqs: - // Return value - MOVB $0x01, ret+8(FP) - RET - -// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool -// Requires: SSE -TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 - MOVQ ctx+0(FP), R10 - MOVQ 8(R10), CX - TESTQ CX, CX - JZ empty_seqs - MOVQ (R10), AX - MOVQ 24(R10), DX - MOVQ 32(R10), BX - MOVQ 80(R10), SI - MOVQ 104(R10), DI - MOVQ 120(R10), R8 - MOVQ 56(R10), R9 - MOVQ 64(R10), R10 - ADDQ R10, R9 - - // seqsBase += 24 * seqIndex - LEAQ (DX)(DX*2), R11 - SHLQ $0x03, R11 - ADDQ R11, AX - - // outBase += outPosition - ADDQ DI, BX - -main_loop: - MOVQ (AX), R11 - MOVQ 16(AX), R12 - MOVQ 8(AX), R13 - - // Copy literals - TESTQ R11, R11 - JZ check_offset - MOVQ R11, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (SI), X0 - MOVUPS X0, (BX) - ADDQ $0x10, SI - ADDQ $0x10, BX - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(SI)(R14*1), SI - LEAQ 16(BX)(R14*1), BX - MOVUPS -16(SI), X0 - MOVUPS X0, -16(BX) - JMP copy_1_end - -copy_1_small: - CMPQ R11, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ R11, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (SI), R14 - MOVB -1(SI)(R11*1), R15 - MOVB R14, (BX) - MOVB R15, -1(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_3: - MOVW (SI), R14 - MOVB 2(SI), R15 - MOVW R14, (BX) - MOVB R15, 2(BX) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_4through7: - MOVL (SI), R14 - MOVL -4(SI)(R11*1), R15 - MOVL R14, (BX) - MOVL R15, -4(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (SI), R14 - MOVQ -8(SI)(R11*1), R15 - MOVQ R14, (BX) - MOVQ R15, -8(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - -copy_1_end: - ADDQ R11, DI - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - LEAQ (DI)(R10*1), R11 - CMPQ R12, R11 - JG error_match_off_too_big - CMPQ R12, R8 - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, R11 - SUBQ DI, R11 - JLS copy_match - MOVQ R9, R14 - SUBQ R11, R14 - CMPQ R13, R11 - JG copy_all_from_history - MOVQ R13, R11 - SUBQ $0x10, R11 - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R11 - JAE copy_4_loop - LEAQ 16(R14)(R11*1), R14 - LEAQ 16(BX)(R11*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), R11 - MOVB 2(R14), R12 - MOVW R11, (BX) - MOVB R12, 2(BX) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), R11 - MOVL -4(R14)(R13*1), R12 - MOVL R11, (BX) - MOVL R12, -4(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), R11 - MOVQ -8(R14)(R13*1), R12 - MOVQ R11, (BX) - MOVQ R12, -8(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - -copy_4_end: - ADDQ R13, DI - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - JMP loop_finished - -copy_all_from_history: - MOVQ R11, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(BX)(R15*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_5_end - -copy_5_small: - CMPQ R11, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ R11, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(R11*1), BP - MOVB R15, (BX) - MOVB BP, -1(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (BX) - MOVB BP, 2(BX) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(R11*1), BP - MOVL R15, (BX) - MOVL BP, -4(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(R11*1), BP - MOVQ R15, (BX) - MOVQ BP, -8(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - -copy_5_end: - ADDQ R11, DI - SUBQ R11, R13 - - // Copy match from the current buffer -copy_match: - MOVQ BX, R11 - SUBQ R12, R11 - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, DI - MOVQ R13, R12 - SUBQ $0x10, R12 - JB copy_2_small - -copy_2_loop: - MOVUPS (R11), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R11 - ADDQ $0x10, BX - SUBQ $0x10, R12 - JAE copy_2_loop - LEAQ 16(R11)(R12*1), R11 - LEAQ 16(BX)(R12*1), BX - MOVUPS -16(R11), X0 - MOVUPS X0, -16(BX) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (R11), R12 - MOVB -1(R11)(R13*1), R14 - MOVB R12, (BX) - MOVB R14, -1(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_3: - MOVW (R11), R12 - MOVB 2(R11), R14 - MOVW R12, (BX) - MOVB R14, 2(BX) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_4through7: - MOVL (R11), R12 - MOVL -4(R11)(R13*1), R14 - MOVL R12, (BX) - MOVL R14, -4(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (R11), R12 - MOVQ -8(R11)(R13*1), R14 - MOVQ R12, (BX) - MOVQ R14, -8(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, DI - -copy_slow_3: - MOVB (R11), R12 - MOVB R12, (BX) - INCQ R11 - INCQ BX - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - -loop_finished: - // Return value - MOVB $0x01, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -error_match_off_too_big: - // Return value - MOVB $0x00, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -empty_seqs: - // Return value - MOVB $0x01, ret+8(FP) - RET - -// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: CMOV, SSE -TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - XORQ CX, CX - MOVQ CX, 8(SP) - MOVQ CX, 16(SP) - MOVQ CX, 24(SP) - MOVQ 112(AX), R10 - MOVQ 128(AX), CX - MOVQ CX, 32(SP) - MOVQ 144(AX), R11 - MOVQ 136(AX), R12 - MOVQ 200(AX), CX - MOVQ CX, 56(SP) - MOVQ 176(AX), CX - MOVQ CX, 48(SP) - MOVQ 184(AX), AX - MOVQ AX, 40(SP) - MOVQ 40(SP), AX - ADDQ AX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R10, 32(SP) - - // outBase += outPosition - ADDQ R12, R10 - -sequenceDecs_decodeSync_amd64_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_amd64_fill_end - -sequenceDecs_decodeSync_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte - -sequenceDecs_decodeSync_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_of_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_of_update_zero: - MOVQ AX, 8(SP) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_ml_update_zero: - MOVQ AX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_amd64_fill_2_end - -sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte - -sequenceDecs_decodeSync_amd64_fill_2_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_ll_update_zero: - MOVQ AX, 24(SP) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decodeSync_amd64_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ AX, $0x01 - JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_amd64_after_adjust - -sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero - -sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_amd64_after_adjust - -sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: - MOVQ R13, AX - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, AX - CMOVQEQ R15, R14 - ADDQ 144(CX)(AX*8), R14 - JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_amd64_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_amd64_adjust_skip - MOVQ 152(CX), AX - MOVQ AX, 160(CX) - -sequenceDecs_decodeSync_amd64_adjust_skip: - MOVQ 144(CX), AX - MOVQ AX, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_amd64_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), AX - MOVQ 24(SP), CX - LEAQ (AX)(CX*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ CX, 104(R14) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decodeSync_amd64_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_amd64_match_len_ofs_ok: - MOVQ 24(SP), AX - MOVQ 8(SP), CX - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (AX)(R13*1), R14 - ADDQ R10, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ AX, AX - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (R11)(R14*1), X0 - MOVUPS X0, (R10)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, AX - JB copy_1 - ADDQ AX, R11 - ADDQ AX, R10 - ADDQ AX, R12 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R12, AX - ADDQ 40(SP), AX - CMPQ CX, AX - JG error_match_off_too_big - CMPQ CX, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ CX, AX - SUBQ R12, AX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ AX, R14 - CMPQ R13, AX - JG copy_all_from_history - MOVQ R13, AX - SUBQ $0x10, AX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, AX - JAE copy_4_loop - LEAQ 16(R14)(AX*1), R14 - LEAQ 16(R10)(AX*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), AX - MOVB 2(R14), CL - MOVW AX, (R10) - MOVB CL, 2(R10) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), AX - MOVL -4(R14)(R13*1), CX - MOVL AX, (R10) - MOVL CX, -4(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), AX - MOVQ -8(R14)(R13*1), CX - MOVQ AX, (R10) - MOVQ CX, -8(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - -copy_4_end: - ADDQ R13, R12 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ AX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R10)(R15*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_5_end - -copy_5_small: - CMPQ AX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ AX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(AX*1), BP - MOVB R15, (R10) - MOVB BP, -1(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R10) - MOVB BP, 2(R10) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(AX*1), BP - MOVL R15, (R10) - MOVL BP, -4(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(AX*1), BP - MOVQ R15, (R10) - MOVQ BP, -8(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - -copy_5_end: - ADDQ AX, R12 - SUBQ AX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R10, AX - SUBQ CX, AX - - // ml <= mo - CMPQ R13, CX - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R12 - MOVQ R10, CX - ADDQ R13, R10 - -copy_2: - MOVUPS (AX), X0 - MOVUPS X0, (CX) - ADDQ $0x10, AX - ADDQ $0x10, CX - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R12 - -copy_slow_3: - MOVB (AX), CL - MOVB CL, (R10) - INCQ AX - INCQ R10 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decodeSync_amd64_main_loop - -loop_finished: - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R12, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R11 - MOVQ R11, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_amd64_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: BMI, BMI2, CMOV, SSE -TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - XORQ R9, R9 - MOVQ R9, 8(SP) - MOVQ R9, 16(SP) - MOVQ R9, 24(SP) - MOVQ 112(CX), R9 - MOVQ 128(CX), R10 - MOVQ R10, 32(SP) - MOVQ 144(CX), R10 - MOVQ 136(CX), R11 - MOVQ 200(CX), R12 - MOVQ R12, 56(SP) - MOVQ 176(CX), R12 - MOVQ R12, 48(SP) - MOVQ 184(CX), CX - MOVQ CX, 40(SP) - MOVQ 40(SP), CX - ADDQ CX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R9, 32(SP) - - // outBase += outPosition - ADDQ R11, R9 - -sequenceDecs_decodeSync_bmi2_main_loop: - MOVQ (SP), R12 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_bmi2_fill_end - -sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte - -sequenceDecs_decodeSync_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 8(SP) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_bmi2_fill_2_end - -sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte - -sequenceDecs_decodeSync_bmi2_fill_2_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 24(SP) - - // Fill bitreader for state updates - MOVQ R12, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R12 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_bmi2_skip_update - LEAQ (SI)(DI*1), R13 - ADDQ R8, R13 - MOVBQZX R13, R13 - LEAQ (DX)(R13*1), CX - MOVQ AX, R14 - MOVQ CX, DX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - - // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decodeSync_bmi2_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ R12, $0x01 - JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_bmi2_after_adjust - -sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero - -sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_bmi2_after_adjust - -sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: - MOVQ R13, R12 - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, R12 - CMOVQEQ R15, R14 - ADDQ 144(CX)(R12*8), R14 - JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_bmi2_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_bmi2_adjust_skip - MOVQ 152(CX), R12 - MOVQ R12, 160(CX) - -sequenceDecs_decodeSync_bmi2_adjust_skip: - MOVQ 144(CX), R12 - MOVQ R12, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_bmi2_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), CX - MOVQ 24(SP), R12 - LEAQ (CX)(R12*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ R12, 104(R14) - JS error_not_enough_literals - CMPQ CX, $0x00020002 - JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok - TESTQ CX, CX - JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: - MOVQ 24(SP), CX - MOVQ 8(SP), R12 - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (CX)(R13*1), R14 - ADDQ R9, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ CX, CX - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (R10)(R14*1), X0 - MOVUPS X0, (R9)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, CX - JB copy_1 - ADDQ CX, R10 - ADDQ CX, R9 - ADDQ CX, R11 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R11, CX - ADDQ 40(SP), CX - CMPQ R12, CX - JG error_match_off_too_big - CMPQ R12, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, CX - SUBQ R11, CX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ CX, R14 - CMPQ R13, CX - JG copy_all_from_history - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, CX - JAE copy_4_loop - LEAQ 16(R14)(CX*1), R14 - LEAQ 16(R9)(CX*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), CX - MOVB 2(R14), R12 - MOVW CX, (R9) - MOVB R12, 2(R9) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), CX - MOVL -4(R14)(R13*1), R12 - MOVL CX, (R9) - MOVL R12, -4(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), CX - MOVQ -8(R14)(R13*1), R12 - MOVQ CX, (R9) - MOVQ R12, -8(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - -copy_4_end: - ADDQ R13, R11 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ CX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R9)(R15*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_5_end - -copy_5_small: - CMPQ CX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ CX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(CX*1), BP - MOVB R15, (R9) - MOVB BP, -1(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R9) - MOVB BP, 2(R9) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(CX*1), BP - MOVL R15, (R9) - MOVL BP, -4(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(CX*1), BP - MOVQ R15, (R9) - MOVQ BP, -8(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - -copy_5_end: - ADDQ CX, R11 - SUBQ CX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R9, CX - SUBQ R12, CX - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R11 - MOVQ R9, R12 - ADDQ R13, R9 - -copy_2: - MOVUPS (CX), X0 - MOVUPS X0, (R12) - ADDQ $0x10, CX - ADDQ $0x10, R12 - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R11 - -copy_slow_3: - MOVB (CX), R12 - MOVB R12, (R9) - INCQ CX - INCQ R9 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decodeSync_bmi2_main_loop - -loop_finished: - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R11, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R10 - MOVQ R10, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_bmi2_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: CMOV, SSE -TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - XORQ CX, CX - MOVQ CX, 8(SP) - MOVQ CX, 16(SP) - MOVQ CX, 24(SP) - MOVQ 112(AX), R10 - MOVQ 128(AX), CX - MOVQ CX, 32(SP) - MOVQ 144(AX), R11 - MOVQ 136(AX), R12 - MOVQ 200(AX), CX - MOVQ CX, 56(SP) - MOVQ 176(AX), CX - MOVQ CX, 48(SP) - MOVQ 184(AX), AX - MOVQ AX, 40(SP) - MOVQ 40(SP), AX - ADDQ AX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R10, 32(SP) - - // outBase += outPosition - ADDQ R12, R10 - -sequenceDecs_decodeSync_safe_amd64_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_safe_amd64_fill_end - -sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_safe_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte - -sequenceDecs_decodeSync_safe_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_of_update_zero: - MOVQ AX, 8(SP) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_ml_update_zero: - MOVQ AX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end - -sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte - -sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_ll_update_zero: - MOVQ AX, 24(SP) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_safe_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decodeSync_safe_amd64_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ AX, $0x01 - JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_safe_amd64_after_adjust - -sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero - -sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_safe_amd64_after_adjust - -sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: - MOVQ R13, AX - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, AX - CMOVQEQ R15, R14 - ADDQ 144(CX)(AX*8), R14 - JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip - MOVQ 152(CX), AX - MOVQ AX, 160(CX) - -sequenceDecs_decodeSync_safe_amd64_adjust_skip: - MOVQ 144(CX), AX - MOVQ AX, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_safe_amd64_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), AX - MOVQ 24(SP), CX - LEAQ (AX)(CX*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ CX, 104(R14) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: - MOVQ 24(SP), AX - MOVQ 8(SP), CX - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (AX)(R13*1), R14 - ADDQ R10, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ AX, AX - JZ check_offset - MOVQ AX, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (R11), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R11 - ADDQ $0x10, R10 - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(R11)(R14*1), R11 - LEAQ 16(R10)(R14*1), R10 - MOVUPS -16(R11), X0 - MOVUPS X0, -16(R10) - JMP copy_1_end - -copy_1_small: - CMPQ AX, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ AX, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (R11), R14 - MOVB -1(R11)(AX*1), R15 - MOVB R14, (R10) - MOVB R15, -1(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_3: - MOVW (R11), R14 - MOVB 2(R11), R15 - MOVW R14, (R10) - MOVB R15, 2(R10) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_4through7: - MOVL (R11), R14 - MOVL -4(R11)(AX*1), R15 - MOVL R14, (R10) - MOVL R15, -4(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (R11), R14 - MOVQ -8(R11)(AX*1), R15 - MOVQ R14, (R10) - MOVQ R15, -8(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - -copy_1_end: - ADDQ AX, R12 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R12, AX - ADDQ 40(SP), AX - CMPQ CX, AX - JG error_match_off_too_big - CMPQ CX, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ CX, AX - SUBQ R12, AX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ AX, R14 - CMPQ R13, AX - JG copy_all_from_history - MOVQ R13, AX - SUBQ $0x10, AX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, AX - JAE copy_4_loop - LEAQ 16(R14)(AX*1), R14 - LEAQ 16(R10)(AX*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), AX - MOVB 2(R14), CL - MOVW AX, (R10) - MOVB CL, 2(R10) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), AX - MOVL -4(R14)(R13*1), CX - MOVL AX, (R10) - MOVL CX, -4(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), AX - MOVQ -8(R14)(R13*1), CX - MOVQ AX, (R10) - MOVQ CX, -8(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - -copy_4_end: - ADDQ R13, R12 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ AX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R10)(R15*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_5_end - -copy_5_small: - CMPQ AX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ AX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(AX*1), BP - MOVB R15, (R10) - MOVB BP, -1(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R10) - MOVB BP, 2(R10) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(AX*1), BP - MOVL R15, (R10) - MOVL BP, -4(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(AX*1), BP - MOVQ R15, (R10) - MOVQ BP, -8(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - -copy_5_end: - ADDQ AX, R12 - SUBQ AX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R10, AX - SUBQ CX, AX - - // ml <= mo - CMPQ R13, CX - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R12 - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_2_small - -copy_2_loop: - MOVUPS (AX), X0 - MOVUPS X0, (R10) - ADDQ $0x10, AX - ADDQ $0x10, R10 - SUBQ $0x10, CX - JAE copy_2_loop - LEAQ 16(AX)(CX*1), AX - LEAQ 16(R10)(CX*1), R10 - MOVUPS -16(AX), X0 - MOVUPS X0, -16(R10) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (AX), CL - MOVB -1(AX)(R13*1), R14 - MOVB CL, (R10) - MOVB R14, -1(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_3: - MOVW (AX), CX - MOVB 2(AX), R14 - MOVW CX, (R10) - MOVB R14, 2(R10) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_4through7: - MOVL (AX), CX - MOVL -4(AX)(R13*1), R14 - MOVL CX, (R10) - MOVL R14, -4(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (AX), CX - MOVQ -8(AX)(R13*1), R14 - MOVQ CX, (R10) - MOVQ R14, -8(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R12 - -copy_slow_3: - MOVB (AX), CL - MOVB CL, (R10) - INCQ AX - INCQ R10 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decodeSync_safe_amd64_main_loop - -loop_finished: - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R12, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R11 - MOVQ R11, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: BMI, BMI2, CMOV, SSE -TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - XORQ R9, R9 - MOVQ R9, 8(SP) - MOVQ R9, 16(SP) - MOVQ R9, 24(SP) - MOVQ 112(CX), R9 - MOVQ 128(CX), R10 - MOVQ R10, 32(SP) - MOVQ 144(CX), R10 - MOVQ 136(CX), R11 - MOVQ 200(CX), R12 - MOVQ R12, 56(SP) - MOVQ 176(CX), R12 - MOVQ R12, 48(SP) - MOVQ 184(CX), CX - MOVQ CX, 40(SP) - MOVQ 40(SP), CX - ADDQ CX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R9, 32(SP) - - // outBase += outPosition - ADDQ R11, R9 - -sequenceDecs_decodeSync_safe_bmi2_main_loop: - MOVQ (SP), R12 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_end - -sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte - -sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 8(SP) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end - -sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte - -sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 24(SP) - - // Fill bitreader for state updates - MOVQ R12, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R12 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_safe_bmi2_skip_update - LEAQ (SI)(DI*1), R13 - ADDQ R8, R13 - MOVBQZX R13, R13 - LEAQ (DX)(R13*1), CX - MOVQ AX, R14 - MOVQ CX, DX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - - // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decodeSync_safe_bmi2_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ R12, $0x01 - JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust - -sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero - -sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust - -sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: - MOVQ R13, R12 - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, R12 - CMOVQEQ R15, R14 - ADDQ 144(CX)(R12*8), R14 - JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip - MOVQ 152(CX), R12 - MOVQ R12, 160(CX) - -sequenceDecs_decodeSync_safe_bmi2_adjust_skip: - MOVQ 144(CX), R12 - MOVQ R12, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_safe_bmi2_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), CX - MOVQ 24(SP), R12 - LEAQ (CX)(R12*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ R12, 104(R14) - JS error_not_enough_literals - CMPQ CX, $0x00020002 - JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok - TESTQ CX, CX - JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: - MOVQ 24(SP), CX - MOVQ 8(SP), R12 - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (CX)(R13*1), R14 - ADDQ R9, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ CX, CX - JZ check_offset - MOVQ CX, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (R10), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R10 - ADDQ $0x10, R9 - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(R10)(R14*1), R10 - LEAQ 16(R9)(R14*1), R9 - MOVUPS -16(R10), X0 - MOVUPS X0, -16(R9) - JMP copy_1_end - -copy_1_small: - CMPQ CX, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ CX, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (R10), R14 - MOVB -1(R10)(CX*1), R15 - MOVB R14, (R9) - MOVB R15, -1(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_3: - MOVW (R10), R14 - MOVB 2(R10), R15 - MOVW R14, (R9) - MOVB R15, 2(R9) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_4through7: - MOVL (R10), R14 - MOVL -4(R10)(CX*1), R15 - MOVL R14, (R9) - MOVL R15, -4(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (R10), R14 - MOVQ -8(R10)(CX*1), R15 - MOVQ R14, (R9) - MOVQ R15, -8(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - -copy_1_end: - ADDQ CX, R11 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R11, CX - ADDQ 40(SP), CX - CMPQ R12, CX - JG error_match_off_too_big - CMPQ R12, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, CX - SUBQ R11, CX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ CX, R14 - CMPQ R13, CX - JG copy_all_from_history - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, CX - JAE copy_4_loop - LEAQ 16(R14)(CX*1), R14 - LEAQ 16(R9)(CX*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), CX - MOVB 2(R14), R12 - MOVW CX, (R9) - MOVB R12, 2(R9) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), CX - MOVL -4(R14)(R13*1), R12 - MOVL CX, (R9) - MOVL R12, -4(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), CX - MOVQ -8(R14)(R13*1), R12 - MOVQ CX, (R9) - MOVQ R12, -8(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - -copy_4_end: - ADDQ R13, R11 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ CX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R9)(R15*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_5_end - -copy_5_small: - CMPQ CX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ CX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(CX*1), BP - MOVB R15, (R9) - MOVB BP, -1(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R9) - MOVB BP, 2(R9) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(CX*1), BP - MOVL R15, (R9) - MOVL BP, -4(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(CX*1), BP - MOVQ R15, (R9) - MOVQ BP, -8(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - -copy_5_end: - ADDQ CX, R11 - SUBQ CX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R9, CX - SUBQ R12, CX - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R11 - MOVQ R13, R12 - SUBQ $0x10, R12 - JB copy_2_small - -copy_2_loop: - MOVUPS (CX), X0 - MOVUPS X0, (R9) - ADDQ $0x10, CX - ADDQ $0x10, R9 - SUBQ $0x10, R12 - JAE copy_2_loop - LEAQ 16(CX)(R12*1), CX - LEAQ 16(R9)(R12*1), R9 - MOVUPS -16(CX), X0 - MOVUPS X0, -16(R9) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (CX), R12 - MOVB -1(CX)(R13*1), R14 - MOVB R12, (R9) - MOVB R14, -1(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_3: - MOVW (CX), R12 - MOVB 2(CX), R14 - MOVW R12, (R9) - MOVB R14, 2(R9) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_4through7: - MOVL (CX), R12 - MOVL -4(CX)(R13*1), R14 - MOVL R12, (R9) - MOVL R14, -4(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (CX), R12 - MOVQ -8(CX)(R13*1), R14 - MOVQ R12, (R9) - MOVQ R14, -8(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R11 - -copy_slow_3: - MOVB (CX), R12 - MOVB R12, (R9) - INCQ CX - INCQ R9 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decodeSync_safe_bmi2_main_loop - -loop_finished: - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R11, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R10 - MOVQ R10, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go deleted file mode 100644 index 2fb35b7..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ /dev/null @@ -1,237 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -package zstd - -import ( - "fmt" - "io" -) - -// decode sequences from the stream with the provided history but without dictionary. -func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { - return false, nil -} - -// decode sequences from the stream without the provided history. -func (s *sequenceDecs) decode(seqs []seqVals) error { - br := s.br - - // Grab full sizes tables, to avoid bounds checks. - llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] - llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - s.seqSize = 0 - litRemain := len(s.literals) - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - for i := range seqs { - var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("WARNING: temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - if br.overread() { - if debugDecoder { - printf("reading sequence %d, exceeded available data\n", i) - } - return io.ErrUnexpectedEOF - } - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - // Evaluate. - // We might be doing this async, so do it early. - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - s.seqSize += ll + ml - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - litRemain -= ll - if litRemain < 0 { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) - } - seqs[i] = seqVals{ - ll: ll, - ml: ml, - mo: mo, - } - if i == len(seqs)-1 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.get32BitsFast(nBits) - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - s.seqSize += litRemain - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - return err -} - -// executeSimple handles cases when a dictionary is not used. -func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { - // Ensure we have enough output size... - if len(s.out)+s.seqSize > cap(s.out) { - addBytes := s.seqSize + len(s.out) - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - for _, seq := range seqs { - // Add literals - copy(out[t:], s.literals[:seq.ll]) - t += seq.ll - s.literals = s.literals[seq.ll:] - - // Malformed input - if seq.mo > t+len(hist) || seq.mo > s.windowSize { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) - } - - // Copy from history. - if v := seq.mo - t; v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if seq.ml > v { - // Some goes into the current block. - // Copy remainder of history - copy(out[t:], hist[start:]) - t += v - seq.ml -= v - } else { - copy(out[t:], hist[start:start+seq.ml]) - t += seq.ml - continue - } - } - - // We must be in the current buffer now - if seq.ml > 0 { - start := t - seq.mo - if seq.ml <= t-start { - // No overlap - copy(out[t:], out[start:start+seq.ml]) - t += seq.ml - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - src := out[start : start+seq.ml] - dst := out[t:] - dst = dst[:len(src)] - t += len(src) - // Destination is the space we just added. - for i := range src { - dst[i] = src[i] - } - } - } - } - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqenc.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqenc.go deleted file mode 100644 index 8014174..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/seqenc.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "math/bits" - -type seqCoders struct { - llEnc, ofEnc, mlEnc *fseEncoder - llPrev, ofPrev, mlPrev *fseEncoder -} - -// swap coders with another (block). -func (s *seqCoders) swap(other *seqCoders) { - *s, *other = *other, *s -} - -// setPrev will update the previous encoders to the actually used ones -// and make sure a fresh one is in the main slot. -func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { - compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { - // We used the new one, more current to history and reuse the previous history - if *current == used { - *prev, *current = *current, *prev - c := *current - p := *prev - c.reUsed = false - p.reUsed = true - return - } - if used == *prev { - return - } - // Ensure we cannot reuse by accident - prevEnc := *prev - prevEnc.symbolLen = 0 - } - compareSwap(ll, &s.llEnc, &s.llPrev) - compareSwap(ml, &s.mlEnc, &s.mlPrev) - compareSwap(of, &s.ofEnc, &s.ofPrev) -} - -func highBit(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} - -var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 16, 17, 17, 18, 18, 19, 19, - 20, 20, 20, 20, 21, 21, 21, 21, - 22, 22, 22, 22, 22, 22, 22, 22, - 23, 23, 23, 23, 23, 23, 23, 23, - 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24} - -// Up to 6 bits -const maxLLCode = 35 - -// llBitsTable translates from ll code to number of bits. -var llBitsTable = [maxLLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 16} - -// llCode returns the code that represents the literal length requested. -func llCode(litLength uint32) uint8 { - const llDeltaCode = 19 - if litLength <= 63 { - // Compiler insists on bounds check (Go 1.12) - return llCodeTable[litLength&63] - } - return uint8(highBit(litLength)) + llDeltaCode -} - -var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, - 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, - 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} - -// Up to 6 bits -const maxMLCode = 52 - -// mlBitsTable translates from ml code to number of bits. -var mlBitsTable = [maxMLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 4, 5, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16} - -// note : mlBase = matchLength - MINMATCH; -// because it's the format it's stored in seqStore->sequences -func mlCode(mlBase uint32) uint8 { - const mlDeltaCode = 36 - if mlBase <= 127 { - // Compiler insists on bounds check (Go 1.12) - return mlCodeTable[mlBase&127] - } - return uint8(highBit(mlBase)) + mlDeltaCode -} - -func ofCode(offset uint32) uint8 { - // A valid offset will always be > 0. - return uint8(bits.Len32(offset) - 1) -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/snappy.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/snappy.go deleted file mode 100644 index ec13594..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/snappy.go +++ /dev/null @@ -1,434 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "hash/crc32" - "io" - - "github.com/klauspost/compress/huff0" - snappy "github.com/klauspost/compress/internal/snapref" -) - -const ( - snappyTagLiteral = 0x00 - snappyTagCopy1 = 0x01 - snappyTagCopy2 = 0x02 - snappyTagCopy4 = 0x03 -) - -const ( - snappyChecksumSize = 4 - snappyMagicBody = "sNaPpY" - - // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - snappyMaxBlockSize = 65536 - - // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - snappyMaxEncodedLenOfMaxBlockSize = 76490 -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var ( - // ErrSnappyCorrupt reports that the input is invalid. - ErrSnappyCorrupt = errors.New("snappy: corrupt input") - // ErrSnappyTooLarge reports that the uncompressed length is too large. - ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") - // ErrSnappyUnsupported reports that the input isn't supported. - ErrSnappyUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. -// Conversion is done by converting the stream directly from Snappy without intermediate -// full decoding. -// Therefore the compression ratio is much less than what can be done by a full decompression -// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without -// any errors being generated. -// No CRC value is being generated and not all CRC values of the Snappy stream are checked. -// However, it provides really fast recompression of Snappy streams. -// The converter can be reused to avoid allocations, even after errors. -type SnappyConverter struct { - r io.Reader - err error - buf []byte - block *blockEnc -} - -// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. -// If any error is detected on the Snappy stream it is returned. -// The number of bytes written is returned. -func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { - initPredefined() - r.err = nil - r.r = in - if r.block == nil { - r.block = &blockEnc{} - r.block.init() - } - r.block.initNewEncode() - if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { - r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) - } - r.block.litEnc.Reuse = huff0.ReusePolicyNone - var written int64 - var readHeader bool - { - header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) - - var n int - n, r.err = w.Write(header) - if r.err != nil { - return written, r.err - } - written += int64(n) - } - - for { - if !r.readFull(r.buf[:4], true) { - // Add empty last block - r.block.reset(nil) - r.block.last = true - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, err := w.Write(r.block.output) - if err != nil { - return written, err - } - written += int64(n) - - return written, r.err - } - chunkType := r.buf[0] - if !readHeader { - if chunkType != chunkTypeStreamIdentifier { - println("chunkType != chunkTypeStreamIdentifier", chunkType) - r.err = ErrSnappyCorrupt - return written, r.err - } - readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - println("chunkLen > len(r.buf)", chunkType) - r.err = ErrSnappyUnsupported - return written, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return written, r.err - } - //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[snappyChecksumSize:] - - n, hdr, err := snappyDecodedLen(buf) - if err != nil { - r.err = err - return written, r.err - } - buf = buf[hdr:] - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - r.block.pushOffsets() - if err := decodeSnappy(r.block, buf); err != nil { - r.err = err - return written, r.err - } - if r.block.size+r.block.extraLits != n { - printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) - r.err = ErrSnappyCorrupt - return written, r.err - } - err = r.block.encode(nil, false, false) - switch err { - case errIncompressible: - r.block.popOffsets() - r.block.reset(nil) - r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) - if err != nil { - return written, err - } - err = r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - case nil: - default: - return written, err - } - - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - case chunkTypeUncompressedData: - if debugEncoder { - println("Uncompressed, chunklen", chunkLen) - } - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - buf := r.buf[:snappyChecksumSize] - if !r.readFull(buf, false) { - return written, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - snappyChecksumSize - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.literals = r.block.literals[:n] - if !r.readFull(r.block.literals, false) { - return written, r.err - } - if snappyCRC(r.block.literals) != checksum { - println("literals crc mismatch") - r.err = ErrSnappyCorrupt - return written, r.err - } - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - - case chunkTypeStreamIdentifier: - if debugEncoder { - println("stream id", chunkLen, len(snappyMagicBody)) - } - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(snappyMagicBody) { - println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) - r.err = ErrSnappyCorrupt - return written, r.err - } - if !r.readFull(r.buf[:len(snappyMagicBody)], false) { - return written, r.err - } - for i := 0; i < len(snappyMagicBody); i++ { - if r.buf[i] != snappyMagicBody[i] { - println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) - r.err = ErrSnappyCorrupt - return written, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - println("chunkType <= 0x7f") - r.err = ErrSnappyUnsupported - return written, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return written, r.err - } - } -} - -// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read. -func decodeSnappy(blk *blockEnc, src []byte) error { - //decodeRef(make([]byte, snappyMaxBlockSize), src) - var s, length int - lits := blk.extraLits - var offset uint32 - for s < len(src) { - switch src[s] & 0x03 { - case snappyTagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - if x > snappyMaxBlockSize { - println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) - return ErrSnappyCorrupt - } - length = int(x) + 1 - if length <= 0 { - println("length <= 0 ", length) - - return errUnsupportedLiteralLength - } - //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { - // return ErrSnappyCorrupt - //} - - blk.literals = append(blk.literals, src[s:s+length]...) - //println(length, "litLen") - lits += length - s += length - continue - - case snappyTagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) - - case snappyTagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = uint32(src[s-2]) | uint32(src[s-1])<<8 - - case snappyTagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - - if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { - println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) - - return ErrSnappyCorrupt - } - - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if false { - offset = blk.matchOffset(offset, uint32(lits)) - } else { - offset += 3 - } - - blk.sequences = append(blk.sequences, seq{ - litLen: uint32(lits), - offset: offset, - matchLen: uint32(length) - zstdMinMatch, - }) - blk.size += length + lits - lits = 0 - } - blk.extraLits = lits - return nil -} - -func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrSnappyCorrupt - } - return false - } - return true -} - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func snappyCRC(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return c>>15 | c<<17 + 0xa282ead8 -} - -// snappyDecodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrSnappyCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrSnappyTooLarge - } - return int(v), n, nil -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/zip.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/zip.go deleted file mode 100644 index 29c15c8..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/zip.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "errors" - "io" - "sync" -) - -// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. -// See https://www.winzip.com/win/en/comp_info.html -const ZipMethodWinZip = 93 - -// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. -// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. -// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT -const ZipMethodPKWare = 20 - -// zipReaderPool is the default reader pool. -var zipReaderPool = sync.Pool{New: func() interface{} { - z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) - if err != nil { - panic(err) - } - return z -}} - -// newZipReader creates a pooled zip decompressor. -func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { - pool := &zipReaderPool - if len(opts) > 0 { - opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) - // Force concurrency 1 - opts = append(opts, WithDecoderConcurrency(1)) - // Create our own pool - pool = &sync.Pool{} - } - return func(r io.Reader) io.ReadCloser { - dec, ok := pool.Get().(*Decoder) - if ok { - dec.Reset(r) - } else { - d, err := NewReader(r, opts...) - if err != nil { - panic(err) - } - dec = d - } - return &pooledZipReader{dec: dec, pool: pool} - } -} - -type pooledZipReader struct { - mu sync.Mutex // guards Close and Read - pool *sync.Pool - dec *Decoder -} - -func (r *pooledZipReader) Read(p []byte) (n int, err error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.dec == nil { - return 0, errors.New("read after close or EOF") - } - dec, err := r.dec.Read(p) - if err == io.EOF { - r.dec.Reset(nil) - r.pool.Put(r.dec) - r.dec = nil - } - return dec, err -} - -func (r *pooledZipReader) Close() error { - r.mu.Lock() - defer r.mu.Unlock() - var err error - if r.dec != nil { - err = r.dec.Reset(nil) - r.pool.Put(r.dec) - r.dec = nil - } - return err -} - -type pooledZipWriter struct { - mu sync.Mutex // guards Close and Read - enc *Encoder - pool *sync.Pool -} - -func (w *pooledZipWriter) Write(p []byte) (n int, err error) { - w.mu.Lock() - defer w.mu.Unlock() - if w.enc == nil { - return 0, errors.New("Write after Close") - } - return w.enc.Write(p) -} - -func (w *pooledZipWriter) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - var err error - if w.enc != nil { - err = w.enc.Close() - w.pool.Put(w.enc) - w.enc = nil - } - return err -} - -// ZipCompressor returns a compressor that can be registered with zip libraries. -// The provided encoder options will be used on all encodes. -func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { - var pool sync.Pool - return func(w io.Writer) (io.WriteCloser, error) { - enc, ok := pool.Get().(*Encoder) - if ok { - enc.Reset(w) - } else { - var err error - enc, err = NewWriter(w, opts...) - if err != nil { - return nil, err - } - } - return &pooledZipWriter{enc: enc, pool: &pool}, nil - } -} - -// ZipDecompressor returns a decompressor that can be registered with zip libraries. -// See ZipCompressor for example. -// Options can be specified. WithDecoderConcurrency(1) is forced, -// and by default a 128MB maximum decompression window is specified. -// The window size can be overridden if required. -func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { - return newZipReader(opts...) -} diff --git a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/zstd.go b/backend/services/controller/vendor/github.com/klauspost/compress/zstd/zstd.go deleted file mode 100644 index 4be7cc7..0000000 --- a/backend/services/controller/vendor/github.com/klauspost/compress/zstd/zstd.go +++ /dev/null @@ -1,121 +0,0 @@ -// Package zstd provides decompression of zstandard files. -// -// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "log" - "math" -) - -// enable debug printing -const debug = false - -// enable encoding debug printing -const debugEncoder = debug - -// enable decoding debug printing -const debugDecoder = debug - -// Enable extra assertions. -const debugAsserts = debug || false - -// print sequence details -const debugSequences = false - -// print detailed matching information -const debugMatches = false - -// force encoder to use predefined tables. -const forcePreDef = false - -// zstdMinMatch is the minimum zstd match length. -const zstdMinMatch = 3 - -// fcsUnknown is used for unknown frame content size. -const fcsUnknown = math.MaxUint64 - -var ( - // ErrReservedBlockType is returned when a reserved block type is found. - // Typically this indicates wrong or corrupted input. - ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") - - // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. - // Typically this indicates wrong or corrupted input. - ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") - - // ErrBlockTooSmall is returned when a block is too small to be decoded. - // Typically returned on invalid input. - ErrBlockTooSmall = errors.New("block too small") - - // ErrUnexpectedBlockSize is returned when a block has unexpected size. - // Typically returned on invalid input. - ErrUnexpectedBlockSize = errors.New("unexpected block size") - - // ErrMagicMismatch is returned when a "magic" number isn't what is expected. - // Typically this indicates wrong or corrupted input. - ErrMagicMismatch = errors.New("invalid input: magic number mismatch") - - // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeExceeded = errors.New("window size exceeded") - - // ErrWindowSizeTooSmall is returned when no window size is specified. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") - - // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. - ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") - - // ErrUnknownDictionary is returned if the dictionary ID is unknown. - ErrUnknownDictionary = errors.New("unknown dictionary") - - // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. - // This is only returned if SingleSegment is specified on the frame. - ErrFrameSizeExceeded = errors.New("frame size exceeded") - - // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. - // This is only returned if SingleSegment is specified on the frame. - ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") - - // ErrCRCMismatch is returned if CRC mismatches. - ErrCRCMismatch = errors.New("CRC check failed") - - // ErrDecoderClosed will be returned if the Decoder was used after - // Close has been called. - ErrDecoderClosed = errors.New("decoder used after Close") - - // ErrDecoderNilInput is returned when a nil Reader was provided - // and an operation other than Reset/DecodeAll/Close was attempted. - ErrDecoderNilInput = errors.New("nil input provided as reader") -) - -func println(a ...interface{}) { - if debug || debugDecoder || debugEncoder { - log.Println(a...) - } -} - -func printf(format string, a ...interface{}) { - if debug || debugDecoder || debugEncoder { - log.Printf(format, a...) - } -} - -func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) -} - -func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) -} - -type byter interface { - Bytes() []byte - Len() int -} - -var _ byter = &bytes.Buffer{} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/.gitignore b/backend/services/controller/vendor/github.com/montanaflynn/stats/.gitignore deleted file mode 100644 index 96b1128..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -coverage.out -.directory \ No newline at end of file diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/.travis.yml b/backend/services/controller/vendor/github.com/montanaflynn/stats/.travis.yml deleted file mode 100644 index 697dcb7..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -language: go -go: - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - tip -before_install: - - sudo pip install codecov -script: - - go test -after_success: - - codecov -notifications: - email: - recipients: - - montana@montanaflynn.me - on_success: change - on_failure: always diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/CHANGELOG.md b/backend/services/controller/vendor/github.com/montanaflynn/stats/CHANGELOG.md deleted file mode 100644 index 532f6ed..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/CHANGELOG.md +++ /dev/null @@ -1,64 +0,0 @@ -# Change Log - -## [0.2.0](https://github.com/montanaflynn/stats/tree/0.2.0) - -### Merged pull requests: - -- Fixed typographical error, changed accomdate to accommodate in README. [\#5](https://github.com/montanaflynn/stats/pull/5) ([saromanov](https://github.com/orthographic-pedant)) - -### Package changes: - -- Add `Correlation` function -- Add `Covariance` function -- Add `StandardDeviation` function to be the same as `StandardDeviationPopulation` -- Change `Variance` function to be the same as `PopulationVariation` -- Add helper methods to `Float64Data` -- Add `Float64Data` type to use instead of `[]float64` -- Add `Series` type which references to `[]Coordinate` - -## [0.1.0](https://github.com/montanaflynn/stats/tree/0.1.0) - -Several functions were renamed in this release. They will still function but may be deprecated in the future. - -### Package changes: - -- Rename `VarP` to `PopulationVariance` -- Rename `VarS` to `SampleVariance` -- Rename `LinReg` to `LinearRegression` -- Rename `ExpReg` to `ExponentialRegression` -- Rename `LogReg` to `LogarithmicRegression` -- Rename `StdDevP` to `StandardDeviationPopulation` -- Rename `StdDevS` to `StandardDeviationSample` - -## [0.0.9](https://github.com/montanaflynn/stats/tree/0.0.9) - -### Closed issues: - -- Functions have unexpected side effects [\#3](https://github.com/montanaflynn/stats/issues/3) -- Percentile is not calculated correctly [\#2](https://github.com/montanaflynn/stats/issues/2) - -### Merged pull requests: - -- Sample [\#4](https://github.com/montanaflynn/stats/pull/4) ([saromanov](https://github.com/saromanov)) - -### Package changes: - -- Add HarmonicMean func -- Add GeometricMean func -- Add Outliers stuct and QuantileOutliers func -- Add Interquartile Range, Midhinge and Trimean examples -- Add Trimean -- Add Midhinge -- Add Inter Quartile Range -- Add Quantiles struct and Quantile func -- Add Nearest Rank method of calculating percentiles -- Add errors for all functions -- Add sample -- Add Linear, Exponential and Logarithmic Regression -- Add sample and population variance and deviation -- Add Percentile and Float64ToInt -- Add Round -- Add Standard deviation -- Add Sum -- Add Min and Ma- x -- Add Mean, Median and Mode diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/LICENSE b/backend/services/controller/vendor/github.com/montanaflynn/stats/LICENSE deleted file mode 100644 index 6648181..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014-2015 Montana Flynn (https://anonfunction.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/Makefile b/backend/services/controller/vendor/github.com/montanaflynn/stats/Makefile deleted file mode 100644 index 87844f4..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -.PHONY: all - -doc: - godoc `pwd` - -webdoc: - godoc -http=:44444 - -format: - go fmt - -test: - go test -race - -check: format test - -benchmark: - go test -bench=. -benchmem - -coverage: - go test -coverprofile=coverage.out - go tool cover -html="coverage.out" - -lint: format - go get github.com/alecthomas/gometalinter - gometalinter --install - gometalinter - -default: lint test diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/README.md b/backend/services/controller/vendor/github.com/montanaflynn/stats/README.md deleted file mode 100644 index 5f8a929..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/README.md +++ /dev/null @@ -1,103 +0,0 @@ -# Stats [![][travis-svg]][travis-url] [![][coveralls-svg]][coveralls-url] [![][godoc-svg]][godoc-url] [![][license-svg]][license-url] - -A statistics package with many functions missing from the Golang standard library. See the [CHANGELOG.md](https://github.com/montanaflynn/stats/blob/master/CHANGELOG.md) for API changes and tagged releases you can vendor into your projects. - -> Statistics are used much like a drunk uses a lamppost: for support, not illumination. **- Vin Scully** - -## Installation - -``` -go get github.com/montanaflynn/stats -``` - -**Protip:** `go get -u github.com/montanaflynn/stats` updates stats to the latest version. - -## Usage - -The [entire API documentation](http://godoc.org/github.com/montanaflynn/stats) is available on GoDoc.org - -You can view docs offline with the following commands: - -``` -godoc ./ -godoc ./ Median -godoc ./ Float64Data -``` - -**Protip:** Generate HTML docs with `godoc -http=:4444` - -## Example - -All the functions can be seen in [examples/main.go](https://github.com/montanaflynn/stats/blob/master/examples/main.go) but here's a little taste: - -```go -// start with the some source data to use -var data = []float64{1, 2, 3, 4, 4, 5} - -median, _ := stats.Median(data) -fmt.Println(median) // 3.5 - -roundedMedian, _ := stats.Round(median, 0) -fmt.Println(roundedMedian) // 4 -``` - -**Protip:** You can [call methods](https://github.com/montanaflynn/stats/blob/master/examples/methods.go) on the data if using the Float64Data type: - -``` -var d stats.Float64Data = data - -max, _ := d.Max() -fmt.Println(max) // 5 -``` - -## Contributing - -If you have any suggestions, criticism or bug reports please [create an issue](https://github.com/montanaflynn/stats/issues) and I'll do my best to accommodate you. In addition simply starring the repo would show your support for the project and be very much appreciated! - -### Pull Requests - -Pull request are always welcome no matter how big or small. Here's an easy way to do it: - -1. Fork it and clone your fork -2. Create new branch (`git checkout -b some-thing`) -3. Make the desired changes -4. Ensure tests pass (`go test -cover` or `make test`) -5. Commit changes (`git commit -am 'Did something'`) -6. Push branch (`git push origin some-thing`) -7. Submit pull request - -To make things as seamless as possible please also consider the following steps: - -- Update `README.md` to include new public types or functions in the documentation section. -- Update `examples/main.go` with a simple example of the new feature. -- Keep 100% code coverage (you can check with `make coverage`). -- Run [`gometalinter`](https://github.com/alecthomas/gometalinter) and make your code pass. -- Squash needless commits into single units of work with `git rebase -i new-feature`. - -#### Makefile - -I've included a [Makefile](https://github.com/montanaflynn/stats/blob/master/Makefile) that has a lot of helper targets for common actions such as linting, testing, code coverage reporting and more. - -**Protip:** `watch -n 1 make check` will continuously format and test your code. - -## MIT License - -Copyright (c) 2014-2015 Montana Flynn - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORpublicS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -[travis-url]: https://travis-ci.org/montanaflynn/stats -[travis-svg]: https://img.shields.io/travis/montanaflynn/stats.svg - -[coveralls-url]: https://coveralls.io/r/montanaflynn/stats?branch=master -[coveralls-svg]: https://img.shields.io/coveralls/montanaflynn/stats.svg - -[godoc-url]: https://godoc.org/github.com/montanaflynn/stats -[godoc-svg]: https://godoc.org/github.com/montanaflynn/stats?status.svg - -[license-url]: https://github.com/montanaflynn/stats/blob/master/LICENSE -[license-svg]: https://img.shields.io/badge/license-MIT-blue.svg diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/correlation.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/correlation.go deleted file mode 100644 index d759bf8..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/correlation.go +++ /dev/null @@ -1,33 +0,0 @@ -package stats - -import "math" - -// Correlation describes the degree of relationship between two sets of data -func Correlation(data1, data2 Float64Data) (float64, error) { - - l1 := data1.Len() - l2 := data2.Len() - - if l1 == 0 || l2 == 0 { - return math.NaN(), EmptyInput - } - - if l1 != l2 { - return math.NaN(), SizeErr - } - - sdev1, _ := StandardDeviationPopulation(data1) - sdev2, _ := StandardDeviationPopulation(data2) - - if sdev1 == 0 || sdev2 == 0 { - return 0, nil - } - - covp, _ := CovariancePopulation(data1, data2) - return covp / (sdev1 * sdev2), nil -} - -// Pearson calculates the Pearson product-moment correlation coefficient between two variables. -func Pearson(data1, data2 Float64Data) (float64, error) { - return Correlation(data1, data2) -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/data.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/data.go deleted file mode 100644 index a087f45..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/data.go +++ /dev/null @@ -1,140 +0,0 @@ -package stats - -// Float64Data is a named type for []float64 with helper methods -type Float64Data []float64 - -// Get item in slice -func (f Float64Data) Get(i int) float64 { return f[i] } - -// Len returns length of slice -func (f Float64Data) Len() int { return len(f) } - -// Less returns if one number is less than another -func (f Float64Data) Less(i, j int) bool { return f[i] < f[j] } - -// Swap switches out two numbers in slice -func (f Float64Data) Swap(i, j int) { f[i], f[j] = f[j], f[i] } - -// Min returns the minimum number in the data -func (f Float64Data) Min() (float64, error) { return Min(f) } - -// Max returns the maximum number in the data -func (f Float64Data) Max() (float64, error) { return Max(f) } - -// Sum returns the total of all the numbers in the data -func (f Float64Data) Sum() (float64, error) { return Sum(f) } - -// Mean returns the mean of the data -func (f Float64Data) Mean() (float64, error) { return Mean(f) } - -// Median returns the median of the data -func (f Float64Data) Median() (float64, error) { return Median(f) } - -// Mode returns the mode of the data -func (f Float64Data) Mode() ([]float64, error) { return Mode(f) } - -// GeometricMean returns the median of the data -func (f Float64Data) GeometricMean() (float64, error) { return GeometricMean(f) } - -// HarmonicMean returns the mode of the data -func (f Float64Data) HarmonicMean() (float64, error) { return HarmonicMean(f) } - -// MedianAbsoluteDeviation the median of the absolute deviations from the dataset median -func (f Float64Data) MedianAbsoluteDeviation() (float64, error) { - return MedianAbsoluteDeviation(f) -} - -// MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median -func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error) { - return MedianAbsoluteDeviationPopulation(f) -} - -// StandardDeviation the amount of variation in the dataset -func (f Float64Data) StandardDeviation() (float64, error) { - return StandardDeviation(f) -} - -// StandardDeviationPopulation finds the amount of variation from the population -func (f Float64Data) StandardDeviationPopulation() (float64, error) { - return StandardDeviationPopulation(f) -} - -// StandardDeviationSample finds the amount of variation from a sample -func (f Float64Data) StandardDeviationSample() (float64, error) { - return StandardDeviationSample(f) -} - -// QuartileOutliers finds the mild and extreme outliers -func (f Float64Data) QuartileOutliers() (Outliers, error) { - return QuartileOutliers(f) -} - -// Percentile finds the relative standing in a slice of floats -func (f Float64Data) Percentile(p float64) (float64, error) { - return Percentile(f, p) -} - -// PercentileNearestRank finds the relative standing using the Nearest Rank method -func (f Float64Data) PercentileNearestRank(p float64) (float64, error) { - return PercentileNearestRank(f, p) -} - -// Correlation describes the degree of relationship between two sets of data -func (f Float64Data) Correlation(d Float64Data) (float64, error) { - return Correlation(f, d) -} - -// Pearson calculates the Pearson product-moment correlation coefficient between two variables. -func (f Float64Data) Pearson(d Float64Data) (float64, error) { - return Pearson(f, d) -} - -// Quartile returns the three quartile points from a slice of data -func (f Float64Data) Quartile(d Float64Data) (Quartiles, error) { - return Quartile(d) -} - -// InterQuartileRange finds the range between Q1 and Q3 -func (f Float64Data) InterQuartileRange() (float64, error) { - return InterQuartileRange(f) -} - -// Midhinge finds the average of the first and third quartiles -func (f Float64Data) Midhinge(d Float64Data) (float64, error) { - return Midhinge(d) -} - -// Trimean finds the average of the median and the midhinge -func (f Float64Data) Trimean(d Float64Data) (float64, error) { - return Trimean(d) -} - -// Sample returns sample from input with replacement or without -func (f Float64Data) Sample(n int, r bool) ([]float64, error) { - return Sample(f, n, r) -} - -// Variance the amount of variation in the dataset -func (f Float64Data) Variance() (float64, error) { - return Variance(f) -} - -// PopulationVariance finds the amount of variance within a population -func (f Float64Data) PopulationVariance() (float64, error) { - return PopulationVariance(f) -} - -// SampleVariance finds the amount of variance within a sample -func (f Float64Data) SampleVariance() (float64, error) { - return SampleVariance(f) -} - -// Covariance is a measure of how much two sets of data change -func (f Float64Data) Covariance(d Float64Data) (float64, error) { - return Covariance(f, d) -} - -// CovariancePopulation computes covariance for entire population between two variables. -func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error) { - return CovariancePopulation(f, d) -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/data_set_distances.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/data_set_distances.go deleted file mode 100644 index 2e549c8..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/data_set_distances.go +++ /dev/null @@ -1,94 +0,0 @@ -package stats - -import ( - "math" -) - -// Validate data for distance calculation -func validateData(dataPointX, dataPointY []float64) error { - if len(dataPointX) == 0 || len(dataPointY) == 0 { - return EmptyInput - } - - if len(dataPointX) != len(dataPointY) { - return SizeErr - } - return nil -} - -// Computes Chebyshev distance between two data sets -func ChebyshevDistance(dataPointX, dataPointY []float64) (distance float64, err error) { - err = validateData(dataPointX, dataPointY) - if err != nil { - return math.NaN(), err - } - var tempDistance float64 - for i := 0; i < len(dataPointY); i++ { - tempDistance = math.Abs(dataPointX[i] - dataPointY[i]) - if distance < tempDistance { - distance = tempDistance - } - } - return distance, nil -} - -// -// Computes Euclidean distance between two data sets -// -func EuclideanDistance(dataPointX, dataPointY []float64) (distance float64, err error) { - - err = validateData(dataPointX, dataPointY) - if err != nil { - return math.NaN(), err - } - distance = 0 - for i := 0; i < len(dataPointX); i++ { - distance = distance + ((dataPointX[i] - dataPointY[i]) * (dataPointX[i] - dataPointY[i])) - } - return math.Sqrt(distance), nil -} - -// -// Computes Manhattan distance between two data sets -// -func ManhattanDistance(dataPointX, dataPointY []float64) (distance float64, err error) { - err = validateData(dataPointX, dataPointY) - if err != nil { - return math.NaN(), err - } - distance = 0 - for i := 0; i < len(dataPointX); i++ { - distance = distance + math.Abs(dataPointX[i]-dataPointY[i]) - } - return distance, nil -} - -// -// Computes minkowski distance between two data sets. -// -// Input: -// dataPointX: First set of data points -// dataPointY: Second set of data points. Length of both data -// sets must be equal. -// lambda: aka p or city blocks; With lambda = 1 -// returned distance is manhattan distance and -// lambda = 2; it is euclidean distance. Lambda -// reaching to infinite - distance would be chebysev -// distance. -// Output: -// Distance or error -// -func MinkowskiDistance(dataPointX, dataPointY []float64, lambda float64) (distance float64, err error) { - err = validateData(dataPointX, dataPointY) - if err != nil { - return math.NaN(), err - } - for i := 0; i < len(dataPointY); i++ { - distance = distance + math.Pow(math.Abs(dataPointX[i]-dataPointY[i]), lambda) - } - distance = math.Pow(distance, float64(1/lambda)) - if math.IsInf(distance, 1) == true { - return math.NaN(), InfValue - } - return distance, nil -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/deviation.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/deviation.go deleted file mode 100644 index 539c02b..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/deviation.go +++ /dev/null @@ -1,57 +0,0 @@ -package stats - -import "math" - -// MedianAbsoluteDeviation finds the median of the absolute deviations from the dataset median -func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) { - return MedianAbsoluteDeviationPopulation(input) -} - -// MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median -func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) { - if input.Len() == 0 { - return math.NaN(), EmptyInput - } - - i := copyslice(input) - m, _ := Median(i) - - for key, value := range i { - i[key] = math.Abs(value - m) - } - - return Median(i) -} - -// StandardDeviation the amount of variation in the dataset -func StandardDeviation(input Float64Data) (sdev float64, err error) { - return StandardDeviationPopulation(input) -} - -// StandardDeviationPopulation finds the amount of variation from the population -func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) { - - if input.Len() == 0 { - return math.NaN(), EmptyInput - } - - // Get the population variance - vp, _ := PopulationVariance(input) - - // Return the population standard deviation - return math.Pow(vp, 0.5), nil -} - -// StandardDeviationSample finds the amount of variation from a sample -func StandardDeviationSample(input Float64Data) (sdev float64, err error) { - - if input.Len() == 0 { - return math.NaN(), EmptyInput - } - - // Get the sample variance - vs, _ := SampleVariance(input) - - // Return the sample standard deviation - return math.Pow(vs, 0.5), nil -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/errors.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/errors.go deleted file mode 100644 index 0bb32f0..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/errors.go +++ /dev/null @@ -1,22 +0,0 @@ -package stats - -type statsErr struct { - err string -} - -func (s statsErr) Error() string { - return s.err -} - -// These are the package-wide error values. -// All error identification should use these values. -var ( - EmptyInput = statsErr{"Input must not be empty."} - SampleSize = statsErr{"Samples number must be less than input length."} - NaNErr = statsErr{"Not a number"} - NegativeErr = statsErr{"Slice must not contain negative values."} - ZeroErr = statsErr{"Slice must not contain zero values."} - BoundsErr = statsErr{"Input is outside of range."} - SizeErr = statsErr{"Slices must be the same length."} - InfValue = statsErr{"Value is infinite."} -) diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/legacy.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/legacy.go deleted file mode 100644 index 17557ab..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/legacy.go +++ /dev/null @@ -1,36 +0,0 @@ -package stats - -// VarP is a shortcut to PopulationVariance -func VarP(input Float64Data) (sdev float64, err error) { - return PopulationVariance(input) -} - -// VarS is a shortcut to SampleVariance -func VarS(input Float64Data) (sdev float64, err error) { - return SampleVariance(input) -} - -// StdDevP is a shortcut to StandardDeviationPopulation -func StdDevP(input Float64Data) (sdev float64, err error) { - return StandardDeviationPopulation(input) -} - -// StdDevS is a shortcut to StandardDeviationSample -func StdDevS(input Float64Data) (sdev float64, err error) { - return StandardDeviationSample(input) -} - -// LinReg is a shortcut to LinearRegression -func LinReg(s []Coordinate) (regressions []Coordinate, err error) { - return LinearRegression(s) -} - -// ExpReg is a shortcut to ExponentialRegression -func ExpReg(s []Coordinate) (regressions []Coordinate, err error) { - return ExponentialRegression(s) -} - -// LogReg is a shortcut to LogarithmicRegression -func LogReg(s []Coordinate) (regressions []Coordinate, err error) { - return LogarithmicRegression(s) -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/load.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/load.go deleted file mode 100644 index 1012d0b..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/load.go +++ /dev/null @@ -1,184 +0,0 @@ -package stats - -import ( - "strconv" - "time" -) - -// LoadRawData parses and converts a slice of mixed data types to floats -func LoadRawData(raw interface{}) (f Float64Data) { - var r []interface{} - var s Float64Data - - switch t := raw.(type) { - case []interface{}: - r = t - case []uint: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []uint8: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []uint16: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []uint32: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []uint64: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []bool: - for _, v := range t { - if v == true { - s = append(s, 1.0) - } else { - s = append(s, 0.0) - } - } - return s - case []float64: - return Float64Data(t) - case []int: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []int8: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []int16: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []int32: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []int64: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []string: - for _, v := range t { - r = append(r, v) - } - case []time.Duration: - for _, v := range t { - r = append(r, v) - } - case map[int]int: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]int8: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]int16: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]int32: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]int64: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]string: - for i := 0; i < len(t); i++ { - r = append(r, t[i]) - } - case map[int]uint: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]uint8: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]uint16: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]uint32: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]uint64: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]bool: - for i := 0; i < len(t); i++ { - if t[i] == true { - s = append(s, 1.0) - } else { - s = append(s, 0.0) - } - } - return s - case map[int]float64: - for i := 0; i < len(t); i++ { - s = append(s, t[i]) - } - return s - case map[int]time.Duration: - for i := 0; i < len(t); i++ { - r = append(r, t[i]) - } - } - - for _, v := range r { - switch t := v.(type) { - case int: - a := float64(t) - f = append(f, a) - case uint: - f = append(f, float64(t)) - case float64: - f = append(f, t) - case string: - fl, err := strconv.ParseFloat(t, 64) - if err == nil { - f = append(f, fl) - } - case bool: - if t == true { - f = append(f, 1.0) - } else { - f = append(f, 0.0) - } - case time.Duration: - f = append(f, float64(t)) - } - } - return f -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/max.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/max.go deleted file mode 100644 index d0fdd42..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/max.go +++ /dev/null @@ -1,24 +0,0 @@ -package stats - -import "math" - -// Max finds the highest number in a slice -func Max(input Float64Data) (max float64, err error) { - - // Return an error if there are no numbers - if input.Len() == 0 { - return math.NaN(), EmptyInput - } - - // Get the first value as the starting point - max = input.Get(0) - - // Loop and replace higher values - for i := 1; i < input.Len(); i++ { - if input.Get(i) > max { - max = input.Get(i) - } - } - - return max, nil -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/mean.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/mean.go deleted file mode 100644 index 944bb65..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/mean.go +++ /dev/null @@ -1,60 +0,0 @@ -package stats - -import "math" - -// Mean gets the average of a slice of numbers -func Mean(input Float64Data) (float64, error) { - - if input.Len() == 0 { - return math.NaN(), EmptyInput - } - - sum, _ := input.Sum() - - return sum / float64(input.Len()), nil -} - -// GeometricMean gets the geometric mean for a slice of numbers -func GeometricMean(input Float64Data) (float64, error) { - - l := input.Len() - if l == 0 { - return math.NaN(), EmptyInput - } - - // Get the product of all the numbers - var p float64 - for _, n := range input { - if p == 0 { - p = n - } else { - p *= n - } - } - - // Calculate the geometric mean - return math.Pow(p, 1/float64(l)), nil -} - -// HarmonicMean gets the harmonic mean for a slice of numbers -func HarmonicMean(input Float64Data) (float64, error) { - - l := input.Len() - if l == 0 { - return math.NaN(), EmptyInput - } - - // Get the sum of all the numbers reciprocals and return an - // error for values that cannot be included in harmonic mean - var p float64 - for _, n := range input { - if n < 0 { - return math.NaN(), NegativeErr - } else if n == 0 { - return math.NaN(), ZeroErr - } - p += (1 / n) - } - - return float64(l) / p, nil -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/median.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/median.go deleted file mode 100644 index b13d839..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/median.go +++ /dev/null @@ -1,25 +0,0 @@ -package stats - -import "math" - -// Median gets the median number in a slice of numbers -func Median(input Float64Data) (median float64, err error) { - - // Start by sorting a copy of the slice - c := sortedCopy(input) - - // No math is needed if there are no numbers - // For even numbers we add the two middle numbers - // and divide by two using the mean function above - // For odd numbers we just use the middle number - l := len(c) - if l == 0 { - return math.NaN(), EmptyInput - } else if l%2 == 0 { - median, _ = Mean(c[l/2-1 : l/2+1]) - } else { - median = float64(c[l/2]) - } - - return median, nil -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/min.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/min.go deleted file mode 100644 index 4383852..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/min.go +++ /dev/null @@ -1,26 +0,0 @@ -package stats - -import "math" - -// Min finds the lowest number in a set of data -func Min(input Float64Data) (min float64, err error) { - - // Get the count of numbers in the slice - l := input.Len() - - // Return an error if there are no numbers - if l == 0 { - return math.NaN(), EmptyInput - } - - // Get the first value as the starting point - min = input.Get(0) - - // Iterate until done checking for a lower value - for i := 1; i < l; i++ { - if input.Get(i) < min { - min = input.Get(i) - } - } - return min, nil -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/mode.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/mode.go deleted file mode 100644 index 1160faf..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/mode.go +++ /dev/null @@ -1,47 +0,0 @@ -package stats - -// Mode gets the mode [most frequent value(s)] of a slice of float64s -func Mode(input Float64Data) (mode []float64, err error) { - // Return the input if there's only one number - l := input.Len() - if l == 1 { - return input, nil - } else if l == 0 { - return nil, EmptyInput - } - - c := sortedCopyDif(input) - // Traverse sorted array, - // tracking the longest repeating sequence - mode = make([]float64, 5) - cnt, maxCnt := 1, 1 - for i := 1; i < l; i++ { - switch { - case c[i] == c[i-1]: - cnt++ - case cnt == maxCnt && maxCnt != 1: - mode = append(mode, c[i-1]) - cnt = 1 - case cnt > maxCnt: - mode = append(mode[:0], c[i-1]) - maxCnt, cnt = cnt, 1 - default: - cnt = 1 - } - } - switch { - case cnt == maxCnt: - mode = append(mode, c[l-1]) - case cnt > maxCnt: - mode = append(mode[:0], c[l-1]) - maxCnt = cnt - } - - // Since length must be greater than 1, - // check for slices of distinct values - if maxCnt == 1 { - return Float64Data{}, nil - } - - return mode, nil -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/outlier.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/outlier.go deleted file mode 100644 index e969180..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/outlier.go +++ /dev/null @@ -1,44 +0,0 @@ -package stats - -// Outliers holds mild and extreme outliers found in data -type Outliers struct { - Mild Float64Data - Extreme Float64Data -} - -// QuartileOutliers finds the mild and extreme outliers -func QuartileOutliers(input Float64Data) (Outliers, error) { - if input.Len() == 0 { - return Outliers{}, EmptyInput - } - - // Start by sorting a copy of the slice - copy := sortedCopy(input) - - // Calculate the quartiles and interquartile range - qs, _ := Quartile(copy) - iqr, _ := InterQuartileRange(copy) - - // Calculate the lower and upper inner and outer fences - lif := qs.Q1 - (1.5 * iqr) - uif := qs.Q3 + (1.5 * iqr) - lof := qs.Q1 - (3 * iqr) - uof := qs.Q3 + (3 * iqr) - - // Find the data points that are outside of the - // inner and upper fences and add them to mild - // and extreme outlier slices - var mild Float64Data - var extreme Float64Data - for _, v := range copy { - - if v < lof || v > uof { - extreme = append(extreme, v) - } else if v < lif || v > uif { - mild = append(mild, v) - } - } - - // Wrap them into our struct - return Outliers{mild, extreme}, nil -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/percentile.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/percentile.go deleted file mode 100644 index baf24d8..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/percentile.go +++ /dev/null @@ -1,80 +0,0 @@ -package stats - -import "math" - -// Percentile finds the relative standing in a slice of floats -func Percentile(input Float64Data, percent float64) (percentile float64, err error) { - - if input.Len() == 0 { - return math.NaN(), EmptyInput - } - - if percent <= 0 || percent > 100 { - return math.NaN(), BoundsErr - } - - // Start by sorting a copy of the slice - c := sortedCopy(input) - - // Multiply percent by length of input - index := (percent / 100) * float64(len(c)) - - // Check if the index is a whole number - if index == float64(int64(index)) { - - // Convert float to int - i := int(index) - - // Find the value at the index - percentile = c[i-1] - - } else if index > 1 { - - // Convert float to int via truncation - i := int(index) - - // Find the average of the index and following values - percentile, _ = Mean(Float64Data{c[i-1], c[i]}) - - } else { - return math.NaN(), BoundsErr - } - - return percentile, nil - -} - -// PercentileNearestRank finds the relative standing in a slice of floats using the Nearest Rank method -func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) { - - // Find the length of items in the slice - il := input.Len() - - // Return an error for empty slices - if il == 0 { - return math.NaN(), EmptyInput - } - - // Return error for less than 0 or greater than 100 percentages - if percent < 0 || percent > 100 { - return math.NaN(), BoundsErr - } - - // Start by sorting a copy of the slice - c := sortedCopy(input) - - // Return the last item - if percent == 100.0 { - return c[il-1], nil - } - - // Find ordinal ranking - or := int(math.Ceil(float64(il) * percent / 100)) - - // Return the item that is in the place of the ordinal rank - if or == 0 { - return c[0], nil - } - return c[or-1], nil - -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/quartile.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/quartile.go deleted file mode 100644 index 29bb3a3..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/quartile.go +++ /dev/null @@ -1,74 +0,0 @@ -package stats - -import "math" - -// Quartiles holds the three quartile points -type Quartiles struct { - Q1 float64 - Q2 float64 - Q3 float64 -} - -// Quartile returns the three quartile points from a slice of data -func Quartile(input Float64Data) (Quartiles, error) { - - il := input.Len() - if il == 0 { - return Quartiles{}, EmptyInput - } - - // Start by sorting a copy of the slice - copy := sortedCopy(input) - - // Find the cutoff places depeding on if - // the input slice length is even or odd - var c1 int - var c2 int - if il%2 == 0 { - c1 = il / 2 - c2 = il / 2 - } else { - c1 = (il - 1) / 2 - c2 = c1 + 1 - } - - // Find the Medians with the cutoff points - Q1, _ := Median(copy[:c1]) - Q2, _ := Median(copy) - Q3, _ := Median(copy[c2:]) - - return Quartiles{Q1, Q2, Q3}, nil - -} - -// InterQuartileRange finds the range between Q1 and Q3 -func InterQuartileRange(input Float64Data) (float64, error) { - if input.Len() == 0 { - return math.NaN(), EmptyInput - } - qs, _ := Quartile(input) - iqr := qs.Q3 - qs.Q1 - return iqr, nil -} - -// Midhinge finds the average of the first and third quartiles -func Midhinge(input Float64Data) (float64, error) { - if input.Len() == 0 { - return math.NaN(), EmptyInput - } - qs, _ := Quartile(input) - mh := (qs.Q1 + qs.Q3) / 2 - return mh, nil -} - -// Trimean finds the average of the median and the midhinge -func Trimean(input Float64Data) (float64, error) { - if input.Len() == 0 { - return math.NaN(), EmptyInput - } - - c := sortedCopy(input) - q, _ := Quartile(c) - - return (q.Q1 + (q.Q2 * 2) + q.Q3) / 4, nil -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/regression.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/regression.go deleted file mode 100644 index a37a740..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/regression.go +++ /dev/null @@ -1,113 +0,0 @@ -package stats - -import "math" - -// Series is a container for a series of data -type Series []Coordinate - -// Coordinate holds the data in a series -type Coordinate struct { - X, Y float64 -} - -// LinearRegression finds the least squares linear regression on data series -func LinearRegression(s Series) (regressions Series, err error) { - - if len(s) == 0 { - return nil, EmptyInput - } - - // Placeholder for the math to be done - var sum [5]float64 - - // Loop over data keeping index in place - i := 0 - for ; i < len(s); i++ { - sum[0] += s[i].X - sum[1] += s[i].Y - sum[2] += s[i].X * s[i].X - sum[3] += s[i].X * s[i].Y - sum[4] += s[i].Y * s[i].Y - } - - // Find gradient and intercept - f := float64(i) - gradient := (f*sum[3] - sum[0]*sum[1]) / (f*sum[2] - sum[0]*sum[0]) - intercept := (sum[1] / f) - (gradient * sum[0] / f) - - // Create the new regression series - for j := 0; j < len(s); j++ { - regressions = append(regressions, Coordinate{ - X: s[j].X, - Y: s[j].X*gradient + intercept, - }) - } - - return regressions, nil - -} - -// ExponentialRegression returns an exponential regression on data series -func ExponentialRegression(s Series) (regressions Series, err error) { - - if len(s) == 0 { - return nil, EmptyInput - } - - var sum [6]float64 - - for i := 0; i < len(s); i++ { - sum[0] += s[i].X - sum[1] += s[i].Y - sum[2] += s[i].X * s[i].X * s[i].Y - sum[3] += s[i].Y * math.Log(s[i].Y) - sum[4] += s[i].X * s[i].Y * math.Log(s[i].Y) - sum[5] += s[i].X * s[i].Y - } - - denominator := (sum[1]*sum[2] - sum[5]*sum[5]) - a := math.Pow(math.E, (sum[2]*sum[3]-sum[5]*sum[4])/denominator) - b := (sum[1]*sum[4] - sum[5]*sum[3]) / denominator - - for j := 0; j < len(s); j++ { - regressions = append(regressions, Coordinate{ - X: s[j].X, - Y: a * math.Exp(b*s[j].X), - }) - } - - return regressions, nil - -} - -// LogarithmicRegression returns an logarithmic regression on data series -func LogarithmicRegression(s Series) (regressions Series, err error) { - - if len(s) == 0 { - return nil, EmptyInput - } - - var sum [4]float64 - - i := 0 - for ; i < len(s); i++ { - sum[0] += math.Log(s[i].X) - sum[1] += s[i].Y * math.Log(s[i].X) - sum[2] += s[i].Y - sum[3] += math.Pow(math.Log(s[i].X), 2) - } - - f := float64(i) - a := (f*sum[1] - sum[2]*sum[0]) / (f*sum[3] - sum[0]*sum[0]) - b := (sum[2] - a*sum[0]) / f - - for j := 0; j < len(s); j++ { - regressions = append(regressions, Coordinate{ - X: s[j].X, - Y: b + a*math.Log(s[j].X), - }) - } - - return regressions, nil - -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/round.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/round.go deleted file mode 100644 index b66779c..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/round.go +++ /dev/null @@ -1,38 +0,0 @@ -package stats - -import "math" - -// Round a float to a specific decimal place or precision -func Round(input float64, places int) (rounded float64, err error) { - - // If the float is not a number - if math.IsNaN(input) { - return math.NaN(), NaNErr - } - - // Find out the actual sign and correct the input for later - sign := 1.0 - if input < 0 { - sign = -1 - input *= -1 - } - - // Use the places arg to get the amount of precision wanted - precision := math.Pow(10, float64(places)) - - // Find the decimal place we are looking to round - digit := input * precision - - // Get the actual decimal number as a fraction to be compared - _, decimal := math.Modf(digit) - - // If the decimal is less than .5 we round down otherwise up - if decimal >= 0.5 { - rounded = math.Ceil(digit) - } else { - rounded = math.Floor(digit) - } - - // Finally we do the math to actually create a rounded number - return rounded / precision * sign, nil -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/sample.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/sample.go deleted file mode 100644 index a52f6dc..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/sample.go +++ /dev/null @@ -1,44 +0,0 @@ -package stats - -import "math/rand" - -// Sample returns sample from input with replacement or without -func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) { - - if input.Len() == 0 { - return nil, EmptyInput - } - - length := input.Len() - if replacement { - - result := Float64Data{} - rand.Seed(unixnano()) - - // In every step, randomly take the num for - for i := 0; i < takenum; i++ { - idx := rand.Intn(length) - result = append(result, input[idx]) - } - - return result, nil - - } else if !replacement && takenum <= length { - - rand.Seed(unixnano()) - - // Get permutation of number of indexies - perm := rand.Perm(length) - result := Float64Data{} - - // Get element of input by permutated index - for _, idx := range perm[0:takenum] { - result = append(result, input[idx]) - } - - return result, nil - - } - - return nil, BoundsErr -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/sum.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/sum.go deleted file mode 100644 index 53485f1..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/sum.go +++ /dev/null @@ -1,18 +0,0 @@ -package stats - -import "math" - -// Sum adds all the numbers of a slice together -func Sum(input Float64Data) (sum float64, err error) { - - if input.Len() == 0 { - return math.NaN(), EmptyInput - } - - // Add em up - for _, n := range input { - sum += n - } - - return sum, nil -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/util.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/util.go deleted file mode 100644 index 8819976..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/util.go +++ /dev/null @@ -1,43 +0,0 @@ -package stats - -import ( - "sort" - "time" -) - -// float64ToInt rounds a float64 to an int -func float64ToInt(input float64) (output int) { - r, _ := Round(input, 0) - return int(r) -} - -// unixnano returns nanoseconds from UTC epoch -func unixnano() int64 { - return time.Now().UTC().UnixNano() -} - -// copyslice copies a slice of float64s -func copyslice(input Float64Data) Float64Data { - s := make(Float64Data, input.Len()) - copy(s, input) - return s -} - -// sortedCopy returns a sorted copy of float64s -func sortedCopy(input Float64Data) (copy Float64Data) { - copy = copyslice(input) - sort.Float64s(copy) - return -} - -// sortedCopyDif returns a sorted copy of float64s -// only if the original data isn't sorted. -// Only use this if returned slice won't be manipulated! -func sortedCopyDif(input Float64Data) (copy Float64Data) { - if sort.Float64sAreSorted(input) { - return input - } - copy = copyslice(input) - sort.Float64s(copy) - return -} diff --git a/backend/services/controller/vendor/github.com/montanaflynn/stats/variance.go b/backend/services/controller/vendor/github.com/montanaflynn/stats/variance.go deleted file mode 100644 index 66e60c9..0000000 --- a/backend/services/controller/vendor/github.com/montanaflynn/stats/variance.go +++ /dev/null @@ -1,105 +0,0 @@ -package stats - -import "math" - -// _variance finds the variance for both population and sample data -func _variance(input Float64Data, sample int) (variance float64, err error) { - - if input.Len() == 0 { - return math.NaN(), EmptyInput - } - - // Sum the square of the mean subtracted from each number - m, _ := Mean(input) - - for _, n := range input { - variance += (float64(n) - m) * (float64(n) - m) - } - - // When getting the mean of the squared differences - // "sample" will allow us to know if it's a sample - // or population and wether to subtract by one or not - return variance / float64((input.Len() - (1 * sample))), nil -} - -// Variance the amount of variation in the dataset -func Variance(input Float64Data) (sdev float64, err error) { - return PopulationVariance(input) -} - -// PopulationVariance finds the amount of variance within a population -func PopulationVariance(input Float64Data) (pvar float64, err error) { - - v, err := _variance(input, 0) - if err != nil { - return math.NaN(), err - } - - return v, nil -} - -// SampleVariance finds the amount of variance within a sample -func SampleVariance(input Float64Data) (svar float64, err error) { - - v, err := _variance(input, 1) - if err != nil { - return math.NaN(), err - } - - return v, nil -} - -// Covariance is a measure of how much two sets of data change -func Covariance(data1, data2 Float64Data) (float64, error) { - - l1 := data1.Len() - l2 := data2.Len() - - if l1 == 0 || l2 == 0 { - return math.NaN(), EmptyInput - } - - if l1 != l2 { - return math.NaN(), SizeErr - } - - m1, _ := Mean(data1) - m2, _ := Mean(data2) - - // Calculate sum of squares - var ss float64 - for i := 0; i < l1; i++ { - delta1 := (data1.Get(i) - m1) - delta2 := (data2.Get(i) - m2) - ss += (delta1*delta2 - ss) / float64(i+1) - } - - return ss * float64(l1) / float64(l1-1), nil -} - -// CovariancePopulation computes covariance for entire population between two variables. -func CovariancePopulation(data1, data2 Float64Data) (float64, error) { - - l1 := data1.Len() - l2 := data2.Len() - - if l1 == 0 || l2 == 0 { - return math.NaN(), EmptyInput - } - - if l1 != l2 { - return math.NaN(), SizeErr - } - - m1, _ := Mean(data1) - m2, _ := Mean(data2) - - var s float64 - for i := 0; i < l1; i++ { - delta1 := (data1.Get(i) - m1) - delta2 := (data2.Get(i) - m2) - s += delta1 * delta2 - } - - return s / float64(l1), nil -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/.gitignore b/backend/services/controller/vendor/github.com/nats-io/nats.go/.gitignore deleted file mode 100644 index ae4871f..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/.gitignore +++ /dev/null @@ -1,45 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -# Emacs -*~ -\#*\# -.\#* - -# vi/vim -.??*.swp - -# Mac -.DS_Store - -# Eclipse -.project -.settings/ - -# bin - -# Goland -.idea - -# VS Code -.vscode \ No newline at end of file diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/.golangci.yaml b/backend/services/controller/vendor/github.com/nats-io/nats.go/.golangci.yaml deleted file mode 100644 index fb548e5..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/.golangci.yaml +++ /dev/null @@ -1,16 +0,0 @@ -issues: - max-issues-per-linter: 0 - max-same-issues: 0 - exclude-rules: - - linters: - - errcheck - text: "Unsubscribe" - - linters: - - errcheck - text: "Drain" - - linters: - - errcheck - text: "msg.Ack" - - linters: - - errcheck - text: "watcher.Stop" diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/.travis.yml b/backend/services/controller/vendor/github.com/nats-io/nats.go/.travis.yml deleted file mode 100644 index 1505f77..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -language: go -go: -- "1.21.x" -- "1.20.x" -go_import_path: github.com/nats-io/nats.go -install: -- go get -t ./... -- curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin -- if [[ "$TRAVIS_GO_VERSION" =~ 1.21 ]]; then - go install github.com/mattn/goveralls@latest; - go install github.com/wadey/gocovmerge@latest; - go install honnef.co/go/tools/cmd/staticcheck@latest; - go install github.com/client9/misspell/cmd/misspell@latest; - fi -before_script: -- $(exit $(go fmt ./... | wc -l)) -- go vet -modfile=go_test.mod ./... -- if [[ "$TRAVIS_GO_VERSION" =~ 1.21 ]]; then - find . -type f -name "*.go" | xargs misspell -error -locale US; - GOFLAGS="-mod=mod -modfile=go_test.mod" staticcheck ./...; - fi -- golangci-lint run ./jetstream/... -script: -- go test -modfile=go_test.mod -v -run=TestNoRace -p=1 ./... --failfast -vet=off -- if [[ "$TRAVIS_GO_VERSION" =~ 1.21 ]]; then ./scripts/cov.sh TRAVIS; else go test -modfile=go_test.mod -race -v -p=1 ./... --failfast -vet=off -tags=internal_testing; fi -after_success: -- if [[ "$TRAVIS_GO_VERSION" =~ 1.21 ]]; then $HOME/gopath/bin/goveralls -coverprofile=acc.out -service travis-ci; fi - -jobs: - include: - - name: "Go: 1.21.x (nats-server@main)" - go: "1.21.x" - before_script: - - go get -modfile go_test.mod github.com/nats-io/nats-server/v2@main - allow_failures: - - name: "Go: 1.21.x (nats-server@main)" diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/.words b/backend/services/controller/vendor/github.com/nats-io/nats.go/.words deleted file mode 100644 index 24be7f6..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/.words +++ /dev/null @@ -1,106 +0,0 @@ -1 - -derek -dlc -ivan - -acknowledgement/SM -arity -deduplication/S -demarshal/SDG -durables -iff -observable/S -redelivery/S -retransmitting -retry/SB - -SlowConsumer - -AppendInt -ReadMIMEHeader - -clientProtoZero -jetstream -v1 -v2 - -ack/SGD -auth -authToken -chans -creds -config/S -cseq -impl -msgh -msgId -mux/S -nack -ptr -puback -scanf -stderr -stdout -structs -tm -todo -unsub/S - -permessage -permessage-deflate -urlA -urlB -websocket -ws -wss - -NKey -pList - -backend/S -backoff/S -decompressor/CGS -inflight -inlined -lookups -reconnection/MS -redeliver/ADGS -responder/S -rewrap/S -rollup/S -unreceive/DRSZGB -variadic -wakeup/S -whitespace -wrap/AS - -omitempty - -apache -html -ietf -www - -sum256 -32bit/S -64bit/S -64k -128k -512k - -hacky -handroll/D - -rfc6455 -rfc7692 -0x00 -0xff -20x -40x -50x - -ErrXXX - -atlanta -eu diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/.words.readme b/backend/services/controller/vendor/github.com/nats-io/nats.go/.words.readme deleted file mode 100644 index 9d9f5cb..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/.words.readme +++ /dev/null @@ -1,25 +0,0 @@ -The .words file is used by gospel (v1.2+), which wraps the Hunspell libraries -but populates the dictionary with identifiers from the Go source. - - - -Alas, no comments are allowed in the .words file and newer versions of gospel -error out on seeing them. This is really a hunspell restriction. - -We assume en_US hunspell dictionaries are installed and used. -The /AFFIXRULES are defined in en_US.aff (eg: /usr/share/hunspell/en_US.aff) -Invoke `hunspell -D` to see the actual locations. - -Words which are in the base dictionary can't have extra affix rules added to -them, so we have to start with the affixed variant we want to add. -Thus `creds` rather than `cred/S` and so on. - -So we can't use receive/DRSZGBU, adding 'U', to allow unreceive and variants, -we have to use unreceive as the stem. - -We can't define our own affix or compound rules, -to capture rfc\d{3,} or 0x[0-9A-Fa-f]{2} - -The spelling tokenizer doesn't take "permessage-deflate" as allowing for ... -"permessage-deflate", which is an RFC7692 registered extension for websockets. -We have to explicitly list "permessage". diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/CODE-OF-CONDUCT.md b/backend/services/controller/vendor/github.com/nats-io/nats.go/CODE-OF-CONDUCT.md deleted file mode 100644 index b850d49..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/CODE-OF-CONDUCT.md +++ /dev/null @@ -1,3 +0,0 @@ -## Community Code of Conduct - -NATS follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/CONTRIBUTING.md b/backend/services/controller/vendor/github.com/nats-io/nats.go/CONTRIBUTING.md deleted file mode 100644 index 9eea61b..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/CONTRIBUTING.md +++ /dev/null @@ -1,45 +0,0 @@ -# Contributing - -Thanks for your interest in contributing! This document contains `nats-io/nats.go` specific contributing details. If you are a first-time contributor, please refer to the general [NATS Contributor Guide](https://nats.io/contributing/) to get a comprehensive overview of contributing to the NATS project. - -## Getting started - -There are three general ways you can contribute to this repo: - -- Proposing an enhancement or new feature -- Reporting a bug or regression -- Contributing changes to the source code - -For the first two, refer to the [GitHub Issues](https://github.com/nats-io/nats.go/issues/new/choose) which guides you through the available options along with the needed information to collect. - -## Contributing changes - -_Prior to opening a pull request, it is recommended to open an issue first to ensure the maintainers can review intended changes. Exceptions to this rule include fixing non-functional source such as code comments, documentation or other supporting files._ - -Proposing source code changes is done through GitHub's standard pull request workflow. - -If your branch is a work-in-progress then please start by creating your pull requests as draft, by clicking the down-arrow next to the `Create pull request` button and instead selecting `Create draft pull request`. - -This will defer the automatic process of requesting a review from the NATS team and significantly reduces noise until you are ready. Once you are happy, you can click the `Ready for review` button. - -### Guidelines - -A good pull request includes: - -- A high-level description of the changes, including links to any issues that are related by adding comments like `Resolves #NNN` to your description. See [Linking a Pull Request to an Issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue) for more information. -- An up-to-date parent commit. Please make sure you are pulling in the latest `main` branch and rebasing your work on top of it, i.e. `git rebase main`. -- Unit tests where appropriate. Bug fixes will benefit from the addition of regression tests. New features will not be accepted without suitable test coverage! -- No more commits than necessary. Sometimes having multiple commits is useful for telling a story or isolating changes from one another, but please squash down any unnecessary commits that may just be for clean-up, comments or small changes. -- No additional external dependencies that aren't absolutely essential. Please do everything you can to avoid pulling in additional libraries/dependencies into `go.mod` as we will be very critical of these. - -### Sign-off - -In order to accept a contribution, you will first need to certify that the contribution is your original work and that you license the work to the project under the [Apache-2.0 license](https://github.com/nats-io/nats.go/blob/main/LICENSE). - -This is done by using `Signed-off-by` statements, which should appear in **both** your commit messages and your PR description. Please note that we can only accept sign-offs under a legal name. Nicknames and aliases are not permitted. - -To perform a sign-off with `git`, use `git commit -s` (or `--signoff`). - -## Get help - -If you have questions about the contribution process, please start a [GitHub discussion](https://github.com/nats-io/nats.go/discussions), join the [NATS Slack](https://slack.nats.io/), or send your question to the [NATS Google Group](https://groups.google.com/forum/#!forum/natsio). diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/GOVERNANCE.md b/backend/services/controller/vendor/github.com/nats-io/nats.go/GOVERNANCE.md deleted file mode 100644 index 1d5a7be..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/GOVERNANCE.md +++ /dev/null @@ -1,3 +0,0 @@ -# NATS Go Client Governance - -NATS Go Client (go-nats) is part of the NATS project and is subject to the [NATS Governance](https://github.com/nats-io/nats-general/blob/master/GOVERNANCE.md). \ No newline at end of file diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/LICENSE b/backend/services/controller/vendor/github.com/nats-io/nats.go/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/MAINTAINERS.md b/backend/services/controller/vendor/github.com/nats-io/nats.go/MAINTAINERS.md deleted file mode 100644 index 2321465..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/MAINTAINERS.md +++ /dev/null @@ -1,8 +0,0 @@ -# Maintainers - -Maintainership is on a per project basis. - -### Maintainers - - Derek Collison [@derekcollison](https://github.com/derekcollison) - - Ivan Kozlovic [@kozlovic](https://github.com/kozlovic) - - Waldemar Quevedo [@wallyqs](https://github.com/wallyqs) diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/README.md b/backend/services/controller/vendor/github.com/nats-io/nats.go/README.md deleted file mode 100644 index bc8cceb..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/README.md +++ /dev/null @@ -1,482 +0,0 @@ -# NATS - Go Client -A [Go](http://golang.org) client for the [NATS messaging system](https://nats.io). - -[![License Apache 2][License-Image]][License-Url] [![Go Report Card][ReportCard-Image]][ReportCard-Url] [![Build Status][Build-Status-Image]][Build-Status-Url] [![GoDoc][GoDoc-Image]][GoDoc-Url] [![Coverage Status][Coverage-image]][Coverage-Url] - -[License-Url]: https://www.apache.org/licenses/LICENSE-2.0 -[License-Image]: https://img.shields.io/badge/License-Apache2-blue.svg -[ReportCard-Url]: https://goreportcard.com/report/github.com/nats-io/nats.go -[ReportCard-Image]: https://goreportcard.com/badge/github.com/nats-io/nats.go -[Build-Status-Url]: https://travis-ci.com/github/nats-io/nats.go -[Build-Status-Image]: https://travis-ci.com/nats-io/nats.go.svg?branch=main -[GoDoc-Url]: https://pkg.go.dev/github.com/nats-io/nats.go -[GoDoc-Image]: https://img.shields.io/badge/GoDoc-reference-007d9c -[Coverage-Url]: https://coveralls.io/r/nats-io/nats.go?branch=main -[Coverage-image]: https://coveralls.io/repos/github/nats-io/nats.go/badge.svg?branch=main - -**Check out [NATS by example](https://natsbyexample.com) - An evolving collection of runnable, cross-client reference examples for NATS.** - -## Installation - -```bash -# Go client -go get github.com/nats-io/nats.go/ - -# Server -go get github.com/nats-io/nats-server -``` - -When using or transitioning to Go modules support: - -```bash -# Go client latest or explicit version -go get github.com/nats-io/nats.go/@latest -go get github.com/nats-io/nats.go/@v1.33.1 - -# For latest NATS Server, add /v2 at the end -go get github.com/nats-io/nats-server/v2 - -# NATS Server v1 is installed otherwise -# go get github.com/nats-io/nats-server -``` - -## Basic Usage - -```go -import "github.com/nats-io/nats.go" - -// Connect to a server -nc, _ := nats.Connect(nats.DefaultURL) - -// Simple Publisher -nc.Publish("foo", []byte("Hello World")) - -// Simple Async Subscriber -nc.Subscribe("foo", func(m *nats.Msg) { - fmt.Printf("Received a message: %s\n", string(m.Data)) -}) - -// Responding to a request message -nc.Subscribe("request", func(m *nats.Msg) { - m.Respond([]byte("answer is 42")) -}) - -// Simple Sync Subscriber -sub, err := nc.SubscribeSync("foo") -m, err := sub.NextMsg(timeout) - -// Channel Subscriber -ch := make(chan *nats.Msg, 64) -sub, err := nc.ChanSubscribe("foo", ch) -msg := <- ch - -// Unsubscribe -sub.Unsubscribe() - -// Drain -sub.Drain() - -// Requests -msg, err := nc.Request("help", []byte("help me"), 10*time.Millisecond) - -// Replies -nc.Subscribe("help", func(m *nats.Msg) { - nc.Publish(m.Reply, []byte("I can help!")) -}) - -// Drain connection (Preferred for responders) -// Close() not needed if this is called. -nc.Drain() - -// Close connection -nc.Close() -``` - -## JetStream - -JetStream is the built-in NATS persistence system. `nats.go` provides a built-in -API enabling both managing JetStream assets as well as publishing/consuming -persistent messages. - -### Basic usage - -```go -// connect to nats server -nc, _ := nats.Connect(nats.DefaultURL) - -// create jetstream context from nats connection -js, _ := jetstream.New(nc) - -ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) -defer cancel() - -// get existing stream handle -stream, _ := js.Stream(ctx, "foo") - -// retrieve consumer handle from a stream -cons, _ := stream.Consumer(ctx, "cons") - -// consume messages from the consumer in callback -cc, _ := cons.Consume(func(msg jetstream.Msg) { - fmt.Println("Received jetstream message: ", string(msg.Data())) - msg.Ack() -}) -defer cc.Stop() -``` - -To find more information on `nats.go` JetStream API, visit -[`jetstream/README.md`](jetstream/README.md) - -> The current JetStream API replaces the [legacy JetStream API](legacy_jetstream.md) - -## Service API - -The service API (`micro`) allows you to [easily build NATS services](micro/README.md) The -services API is currently in beta release. - -## Encoded Connections - -```go - -nc, _ := nats.Connect(nats.DefaultURL) -c, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER) -defer c.Close() - -// Simple Publisher -c.Publish("foo", "Hello World") - -// Simple Async Subscriber -c.Subscribe("foo", func(s string) { - fmt.Printf("Received a message: %s\n", s) -}) - -// EncodedConn can Publish any raw Go type using the registered Encoder -type person struct { - Name string - Address string - Age int -} - -// Go type Subscriber -c.Subscribe("hello", func(p *person) { - fmt.Printf("Received a person: %+v\n", p) -}) - -me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street, San Francisco, CA"} - -// Go type Publisher -c.Publish("hello", me) - -// Unsubscribe -sub, err := c.Subscribe("foo", nil) -// ... -sub.Unsubscribe() - -// Requests -var response string -err = c.Request("help", "help me", &response, 10*time.Millisecond) -if err != nil { - fmt.Printf("Request failed: %v\n", err) -} - -// Replying -c.Subscribe("help", func(subj, reply string, msg string) { - c.Publish(reply, "I can help!") -}) - -// Close connection -c.Close(); -``` - -## New Authentication (Nkeys and User Credentials) -This requires server with version >= 2.0.0 - -NATS servers have a new security and authentication mechanism to authenticate with user credentials and Nkeys. -The simplest form is to use the helper method UserCredentials(credsFilepath). -```go -nc, err := nats.Connect(url, nats.UserCredentials("user.creds")) -``` - -The helper methods creates two callback handlers to present the user JWT and sign the nonce challenge from the server. -The core client library never has direct access to your private key and simply performs the callback for signing the server challenge. -The helper will load and wipe and erase memory it uses for each connect or reconnect. - -The helper also can take two entries, one for the JWT and one for the NKey seed file. -```go -nc, err := nats.Connect(url, nats.UserCredentials("user.jwt", "user.nk")) -``` - -You can also set the callback handlers directly and manage challenge signing directly. -```go -nc, err := nats.Connect(url, nats.UserJWT(jwtCB, sigCB)) -``` - -Bare Nkeys are also supported. The nkey seed should be in a read only file, e.g. seed.txt -```bash -> cat seed.txt -# This is my seed nkey! -SUAGMJH5XLGZKQQWAWKRZJIGMOU4HPFUYLXJMXOO5NLFEO2OOQJ5LPRDPM -``` - -This is a helper function which will load and decode and do the proper signing for the server nonce. -It will clear memory in between invocations. -You can choose to use the low level option and provide the public key and a signature callback on your own. - -```go -opt, err := nats.NkeyOptionFromSeed("seed.txt") -nc, err := nats.Connect(serverUrl, opt) - -// Direct -nc, err := nats.Connect(serverUrl, nats.Nkey(pubNkey, sigCB)) -``` - -## TLS - -```go -// tls as a scheme will enable secure connections by default. This will also verify the server name. -nc, err := nats.Connect("tls://nats.demo.io:4443") - -// If you are using a self-signed certificate, you need to have a tls.Config with RootCAs setup. -// We provide a helper method to make this case easier. -nc, err = nats.Connect("tls://localhost:4443", nats.RootCAs("./configs/certs/ca.pem")) - -// If the server requires client certificate, there is an helper function for that too: -cert := nats.ClientCert("./configs/certs/client-cert.pem", "./configs/certs/client-key.pem") -nc, err = nats.Connect("tls://localhost:4443", cert) - -// You can also supply a complete tls.Config - -certFile := "./configs/certs/client-cert.pem" -keyFile := "./configs/certs/client-key.pem" -cert, err := tls.LoadX509KeyPair(certFile, keyFile) -if err != nil { - t.Fatalf("error parsing X509 certificate/key pair: %v", err) -} - -config := &tls.Config{ - ServerName: opts.Host, - Certificates: []tls.Certificate{cert}, - RootCAs: pool, - MinVersion: tls.VersionTLS12, -} - -nc, err = nats.Connect("nats://localhost:4443", nats.Secure(config)) -if err != nil { - t.Fatalf("Got an error on Connect with Secure Options: %+v\n", err) -} - -``` - -## Using Go Channels (netchan) - -```go -nc, _ := nats.Connect(nats.DefaultURL) -ec, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER) -defer ec.Close() - -type person struct { - Name string - Address string - Age int -} - -recvCh := make(chan *person) -ec.BindRecvChan("hello", recvCh) - -sendCh := make(chan *person) -ec.BindSendChan("hello", sendCh) - -me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street"} - -// Send via Go channels -sendCh <- me - -// Receive via Go channels -who := <- recvCh -``` - -## Wildcard Subscriptions - -```go - -// "*" matches any token, at any level of the subject. -nc.Subscribe("foo.*.baz", func(m *Msg) { - fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); -}) - -nc.Subscribe("foo.bar.*", func(m *Msg) { - fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); -}) - -// ">" matches any length of the tail of a subject, and can only be the last token -// E.g. 'foo.>' will match 'foo.bar', 'foo.bar.baz', 'foo.foo.bar.bax.22' -nc.Subscribe("foo.>", func(m *Msg) { - fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); -}) - -// Matches all of the above -nc.Publish("foo.bar.baz", []byte("Hello World")) - -``` - -## Queue Groups - -```go -// All subscriptions with the same queue name will form a queue group. -// Each message will be delivered to only one subscriber per queue group, -// using queuing semantics. You can have as many queue groups as you wish. -// Normal subscribers will continue to work as expected. - -nc.QueueSubscribe("foo", "job_workers", func(_ *Msg) { - received += 1; -}) -``` - -## Advanced Usage - -```go - -// Normally, the library will return an error when trying to connect and -// there is no server running. The RetryOnFailedConnect option will set -// the connection in reconnecting state if it failed to connect right away. -nc, err := nats.Connect(nats.DefaultURL, - nats.RetryOnFailedConnect(true), - nats.MaxReconnects(10), - nats.ReconnectWait(time.Second), - nats.ReconnectHandler(func(_ *nats.Conn) { - // Note that this will be invoked for the first asynchronous connect. - })) -if err != nil { - // Should not return an error even if it can't connect, but you still - // need to check in case there are some configuration errors. -} - -// Flush connection to server, returns when all messages have been processed. -nc.Flush() -fmt.Println("All clear!") - -// FlushTimeout specifies a timeout value as well. -err := nc.FlushTimeout(1*time.Second) -if err != nil { - fmt.Println("All clear!") -} else { - fmt.Println("Flushed timed out!") -} - -// Auto-unsubscribe after MAX_WANTED messages received -const MAX_WANTED = 10 -sub, err := nc.Subscribe("foo") -sub.AutoUnsubscribe(MAX_WANTED) - -// Multiple connections -nc1 := nats.Connect("nats://host1:4222") -nc2 := nats.Connect("nats://host2:4222") - -nc1.Subscribe("foo", func(m *Msg) { - fmt.Printf("Received a message: %s\n", string(m.Data)) -}) - -nc2.Publish("foo", []byte("Hello World!")); - -``` - -## Clustered Usage - -```go - -var servers = "nats://localhost:1222, nats://localhost:1223, nats://localhost:1224" - -nc, err := nats.Connect(servers) - -// Optionally set ReconnectWait and MaxReconnect attempts. -// This example means 10 seconds total per backend. -nc, err = nats.Connect(servers, nats.MaxReconnects(5), nats.ReconnectWait(2 * time.Second)) - -// You can also add some jitter for the reconnection. -// This call will add up to 500 milliseconds for non TLS connections and 2 seconds for TLS connections. -// If not specified, the library defaults to 100 milliseconds and 1 second, respectively. -nc, err = nats.Connect(servers, nats.ReconnectJitter(500*time.Millisecond, 2*time.Second)) - -// You can also specify a custom reconnect delay handler. If set, the library will invoke it when it has tried -// all URLs in its list. The value returned will be used as the total sleep time, so add your own jitter. -// The library will pass the number of times it went through the whole list. -nc, err = nats.Connect(servers, nats.CustomReconnectDelay(func(attempts int) time.Duration { - return someBackoffFunction(attempts) -})) - -// Optionally disable randomization of the server pool -nc, err = nats.Connect(servers, nats.DontRandomize()) - -// Setup callbacks to be notified on disconnects, reconnects and connection closed. -nc, err = nats.Connect(servers, - nats.DisconnectErrHandler(func(nc *nats.Conn, err error) { - fmt.Printf("Got disconnected! Reason: %q\n", err) - }), - nats.ReconnectHandler(func(nc *nats.Conn) { - fmt.Printf("Got reconnected to %v!\n", nc.ConnectedUrl()) - }), - nats.ClosedHandler(func(nc *nats.Conn) { - fmt.Printf("Connection closed. Reason: %q\n", nc.LastError()) - }) -) - -// When connecting to a mesh of servers with auto-discovery capabilities, -// you may need to provide a username/password or token in order to connect -// to any server in that mesh when authentication is required. -// Instead of providing the credentials in the initial URL, you will use -// new option setters: -nc, err = nats.Connect("nats://localhost:4222", nats.UserInfo("foo", "bar")) - -// For token based authentication: -nc, err = nats.Connect("nats://localhost:4222", nats.Token("S3cretT0ken")) - -// You can even pass the two at the same time in case one of the server -// in the mesh requires token instead of user name and password. -nc, err = nats.Connect("nats://localhost:4222", - nats.UserInfo("foo", "bar"), - nats.Token("S3cretT0ken")) - -// Note that if credentials are specified in the initial URLs, they take -// precedence on the credentials specified through the options. -// For instance, in the connect call below, the client library will use -// the user "my" and password "pwd" to connect to localhost:4222, however, -// it will use username "foo" and password "bar" when (re)connecting to -// a different server URL that it got as part of the auto-discovery. -nc, err = nats.Connect("nats://my:pwd@localhost:4222", nats.UserInfo("foo", "bar")) - -``` - -## Context support (+Go 1.7) - -```go -ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) -defer cancel() - -nc, err := nats.Connect(nats.DefaultURL) - -// Request with context -msg, err := nc.RequestWithContext(ctx, "foo", []byte("bar")) - -// Synchronous subscriber with context -sub, err := nc.SubscribeSync("foo") -msg, err := sub.NextMsgWithContext(ctx) - -// Encoded Request with context -c, err := nats.NewEncodedConn(nc, nats.JSON_ENCODER) -type request struct { - Message string `json:"message"` -} -type response struct { - Code int `json:"code"` -} -req := &request{Message: "Hello"} -resp := &response{} -err := c.RequestWithContext(ctx, "foo", req, resp) -``` - -## License - -Unless otherwise noted, the NATS source files are distributed -under the Apache Version 2.0 license found in the LICENSE file. - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fnats-io%2Fgo-nats.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fnats-io%2Fgo-nats?ref=badge_large) diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/context.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/context.go deleted file mode 100644 index 20f1782..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/context.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2016-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "context" - "reflect" -) - -// RequestMsgWithContext takes a context, a subject and payload -// in bytes and request expecting a single response. -func (nc *Conn) RequestMsgWithContext(ctx context.Context, msg *Msg) (*Msg, error) { - if msg == nil { - return nil, ErrInvalidMsg - } - hdr, err := msg.headerBytes() - if err != nil { - return nil, err - } - return nc.requestWithContext(ctx, msg.Subject, hdr, msg.Data) -} - -// RequestWithContext takes a context, a subject and payload -// in bytes and request expecting a single response. -func (nc *Conn) RequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) { - return nc.requestWithContext(ctx, subj, nil, data) -} - -func (nc *Conn) requestWithContext(ctx context.Context, subj string, hdr, data []byte) (*Msg, error) { - if ctx == nil { - return nil, ErrInvalidContext - } - if nc == nil { - return nil, ErrInvalidConnection - } - // Check whether the context is done already before making - // the request. - if ctx.Err() != nil { - return nil, ctx.Err() - } - - var m *Msg - var err error - - // If user wants the old style. - if nc.useOldRequestStyle() { - m, err = nc.oldRequestWithContext(ctx, subj, hdr, data) - } else { - mch, token, err := nc.createNewRequestAndSend(subj, hdr, data) - if err != nil { - return nil, err - } - - var ok bool - - select { - case m, ok = <-mch: - if !ok { - return nil, ErrConnectionClosed - } - case <-ctx.Done(): - nc.mu.Lock() - delete(nc.respMap, token) - nc.mu.Unlock() - return nil, ctx.Err() - } - } - // Check for no responder status. - if err == nil && len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders { - m, err = nil, ErrNoResponders - } - return m, err -} - -// oldRequestWithContext utilizes inbox and subscription per request. -func (nc *Conn) oldRequestWithContext(ctx context.Context, subj string, hdr, data []byte) (*Msg, error) { - inbox := nc.NewInbox() - ch := make(chan *Msg, RequestChanLen) - - s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, true, nil) - if err != nil { - return nil, err - } - s.AutoUnsubscribe(1) - defer s.Unsubscribe() - - err = nc.publish(subj, inbox, hdr, data) - if err != nil { - return nil, err - } - - return s.NextMsgWithContext(ctx) -} - -func (s *Subscription) nextMsgWithContext(ctx context.Context, pullSubInternal, waitIfNoMsg bool) (*Msg, error) { - if ctx == nil { - return nil, ErrInvalidContext - } - if s == nil { - return nil, ErrBadSubscription - } - if ctx.Err() != nil { - return nil, ctx.Err() - } - - s.mu.Lock() - err := s.validateNextMsgState(pullSubInternal) - if err != nil { - s.mu.Unlock() - return nil, err - } - - // snapshot - mch := s.mch - s.mu.Unlock() - - var ok bool - var msg *Msg - - // If something is available right away, let's optimize that case. - select { - case msg, ok = <-mch: - if !ok { - return nil, s.getNextMsgErr() - } - if err := s.processNextMsgDelivered(msg); err != nil { - return nil, err - } - return msg, nil - default: - // If internal and we don't want to wait, signal that there is no - // message in the internal queue. - if pullSubInternal && !waitIfNoMsg { - return nil, errNoMessages - } - } - - select { - case msg, ok = <-mch: - if !ok { - return nil, s.getNextMsgErr() - } - if err := s.processNextMsgDelivered(msg); err != nil { - return nil, err - } - case <-ctx.Done(): - return nil, ctx.Err() - } - - return msg, nil -} - -// NextMsgWithContext takes a context and returns the next message -// available to a synchronous subscriber, blocking until it is delivered -// or context gets canceled. -func (s *Subscription) NextMsgWithContext(ctx context.Context) (*Msg, error) { - return s.nextMsgWithContext(ctx, false, true) -} - -// FlushWithContext will allow a context to control the duration -// of a Flush() call. This context should be non-nil and should -// have a deadline set. We will return an error if none is present. -func (nc *Conn) FlushWithContext(ctx context.Context) error { - if nc == nil { - return ErrInvalidConnection - } - if ctx == nil { - return ErrInvalidContext - } - _, ok := ctx.Deadline() - if !ok { - return ErrNoDeadlineContext - } - - nc.mu.Lock() - if nc.isClosed() { - nc.mu.Unlock() - return ErrConnectionClosed - } - // Create a buffered channel to prevent chan send to block - // in processPong() - ch := make(chan struct{}, 1) - nc.sendPing(ch) - nc.mu.Unlock() - - var err error - - select { - case _, ok := <-ch: - if !ok { - err = ErrConnectionClosed - } else { - close(ch) - } - case <-ctx.Done(): - err = ctx.Err() - } - - if err != nil { - nc.removeFlushEntry(ch) - } - - return err -} - -// RequestWithContext will create an Inbox and perform a Request -// using the provided cancellation context with the Inbox reply -// for the data v. A response will be decoded into the vPtr last parameter. -func (c *EncodedConn) RequestWithContext(ctx context.Context, subject string, v any, vPtr any) error { - if ctx == nil { - return ErrInvalidContext - } - - b, err := c.Enc.Encode(subject, v) - if err != nil { - return err - } - m, err := c.Conn.RequestWithContext(ctx, subject, b) - if err != nil { - return err - } - if reflect.TypeOf(vPtr) == emptyMsgType { - mPtr := vPtr.(*Msg) - *mPtr = *m - } else { - err := c.Enc.Decode(m.Subject, m.Data, vPtr) - if err != nil { - return err - } - } - - return nil -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/dependencies.md b/backend/services/controller/vendor/github.com/nats-io/nats.go/dependencies.md deleted file mode 100644 index ec9ab3c..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/dependencies.md +++ /dev/null @@ -1,15 +0,0 @@ -# External Dependencies - -This file lists the dependencies used in this repository. - -| Dependency | License | -|-----------------------------------|--------------| -| Go | BSD 3-Clause | -| github.com/golang/protobuf/proto | BSD-3-Clause | -| github.com/klauspost/compress | BSD-3-Clause | -| github.com/nats-io/nats-server/v2 | Apache-2.0 | -| github.com/nats-io/nkeys | Apache-2.0 | -| github.com/nats-io/nuid | Apache-2.0 | -| go.uber.org/goleak | MIT | -| golang.org/x/text | BSD-3-Clause | -| google.golang.org/protobuf | BSD-3-Clause | diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/enc.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/enc.go deleted file mode 100644 index 4550f61..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/enc.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright 2012-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "errors" - "fmt" - "reflect" - "sync" - "time" - - // Default Encoders - "github.com/nats-io/nats.go/encoders/builtin" -) - -// Encoder interface is for all register encoders -type Encoder interface { - Encode(subject string, v any) ([]byte, error) - Decode(subject string, data []byte, vPtr any) error -} - -var encMap map[string]Encoder -var encLock sync.Mutex - -// Indexed names into the Registered Encoders. -const ( - JSON_ENCODER = "json" - GOB_ENCODER = "gob" - DEFAULT_ENCODER = "default" -) - -func init() { - encMap = make(map[string]Encoder) - // Register json, gob and default encoder - RegisterEncoder(JSON_ENCODER, &builtin.JsonEncoder{}) - RegisterEncoder(GOB_ENCODER, &builtin.GobEncoder{}) - RegisterEncoder(DEFAULT_ENCODER, &builtin.DefaultEncoder{}) -} - -// EncodedConn are the preferred way to interface with NATS. They wrap a bare connection to -// a nats server and have an extendable encoder system that will encode and decode messages -// from raw Go types. -type EncodedConn struct { - Conn *Conn - Enc Encoder -} - -// NewEncodedConn will wrap an existing Connection and utilize the appropriate registered -// encoder. -func NewEncodedConn(c *Conn, encType string) (*EncodedConn, error) { - if c == nil { - return nil, errors.New("nats: Nil Connection") - } - if c.IsClosed() { - return nil, ErrConnectionClosed - } - ec := &EncodedConn{Conn: c, Enc: EncoderForType(encType)} - if ec.Enc == nil { - return nil, fmt.Errorf("no encoder registered for '%s'", encType) - } - return ec, nil -} - -// RegisterEncoder will register the encType with the given Encoder. Useful for customization. -func RegisterEncoder(encType string, enc Encoder) { - encLock.Lock() - defer encLock.Unlock() - encMap[encType] = enc -} - -// EncoderForType will return the registered Encoder for the encType. -func EncoderForType(encType string) Encoder { - encLock.Lock() - defer encLock.Unlock() - return encMap[encType] -} - -// Publish publishes the data argument to the given subject. The data argument -// will be encoded using the associated encoder. -func (c *EncodedConn) Publish(subject string, v any) error { - b, err := c.Enc.Encode(subject, v) - if err != nil { - return err - } - return c.Conn.publish(subject, _EMPTY_, nil, b) -} - -// PublishRequest will perform a Publish() expecting a response on the -// reply subject. Use Request() for automatically waiting for a response -// inline. -func (c *EncodedConn) PublishRequest(subject, reply string, v any) error { - b, err := c.Enc.Encode(subject, v) - if err != nil { - return err - } - return c.Conn.publish(subject, reply, nil, b) -} - -// Request will create an Inbox and perform a Request() call -// with the Inbox reply for the data v. A response will be -// decoded into the vPtr Response. -func (c *EncodedConn) Request(subject string, v any, vPtr any, timeout time.Duration) error { - b, err := c.Enc.Encode(subject, v) - if err != nil { - return err - } - m, err := c.Conn.Request(subject, b, timeout) - if err != nil { - return err - } - if reflect.TypeOf(vPtr) == emptyMsgType { - mPtr := vPtr.(*Msg) - *mPtr = *m - } else { - err = c.Enc.Decode(m.Subject, m.Data, vPtr) - } - return err -} - -// Handler is a specific callback used for Subscribe. It is generalized to -// an any, but we will discover its format and arguments at runtime -// and perform the correct callback, including demarshaling encoded data -// back into the appropriate struct based on the signature of the Handler. -// -// Handlers are expected to have one of four signatures. -// -// type person struct { -// Name string `json:"name,omitempty"` -// Age uint `json:"age,omitempty"` -// } -// -// handler := func(m *Msg) -// handler := func(p *person) -// handler := func(subject string, o *obj) -// handler := func(subject, reply string, o *obj) -// -// These forms allow a callback to request a raw Msg ptr, where the processing -// of the message from the wire is untouched. Process a JSON representation -// and demarshal it into the given struct, e.g. person. -// There are also variants where the callback wants either the subject, or the -// subject and the reply subject. -type Handler any - -// Dissect the cb Handler's signature -func argInfo(cb Handler) (reflect.Type, int) { - cbType := reflect.TypeOf(cb) - if cbType.Kind() != reflect.Func { - panic("nats: Handler needs to be a func") - } - numArgs := cbType.NumIn() - if numArgs == 0 { - return nil, numArgs - } - return cbType.In(numArgs - 1), numArgs -} - -var emptyMsgType = reflect.TypeOf(&Msg{}) - -// Subscribe will create a subscription on the given subject and process incoming -// messages using the specified Handler. The Handler should be a func that matches -// a signature from the description of Handler from above. -func (c *EncodedConn) Subscribe(subject string, cb Handler) (*Subscription, error) { - return c.subscribe(subject, _EMPTY_, cb) -} - -// QueueSubscribe will create a queue subscription on the given subject and process -// incoming messages using the specified Handler. The Handler should be a func that -// matches a signature from the description of Handler from above. -func (c *EncodedConn) QueueSubscribe(subject, queue string, cb Handler) (*Subscription, error) { - return c.subscribe(subject, queue, cb) -} - -// Internal implementation that all public functions will use. -func (c *EncodedConn) subscribe(subject, queue string, cb Handler) (*Subscription, error) { - if cb == nil { - return nil, errors.New("nats: Handler required for EncodedConn Subscription") - } - argType, numArgs := argInfo(cb) - if argType == nil { - return nil, errors.New("nats: Handler requires at least one argument") - } - - cbValue := reflect.ValueOf(cb) - wantsRaw := (argType == emptyMsgType) - - natsCB := func(m *Msg) { - var oV []reflect.Value - if wantsRaw { - oV = []reflect.Value{reflect.ValueOf(m)} - } else { - var oPtr reflect.Value - if argType.Kind() != reflect.Ptr { - oPtr = reflect.New(argType) - } else { - oPtr = reflect.New(argType.Elem()) - } - if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil { - if c.Conn.Opts.AsyncErrorCB != nil { - c.Conn.ach.push(func() { - c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, errors.New("nats: Got an error trying to unmarshal: "+err.Error())) - }) - } - return - } - if argType.Kind() != reflect.Ptr { - oPtr = reflect.Indirect(oPtr) - } - - // Callback Arity - switch numArgs { - case 1: - oV = []reflect.Value{oPtr} - case 2: - subV := reflect.ValueOf(m.Subject) - oV = []reflect.Value{subV, oPtr} - case 3: - subV := reflect.ValueOf(m.Subject) - replyV := reflect.ValueOf(m.Reply) - oV = []reflect.Value{subV, replyV, oPtr} - } - - } - cbValue.Call(oV) - } - - return c.Conn.subscribe(subject, queue, natsCB, nil, false, nil) -} - -// FlushTimeout allows a Flush operation to have an associated timeout. -func (c *EncodedConn) FlushTimeout(timeout time.Duration) (err error) { - return c.Conn.FlushTimeout(timeout) -} - -// Flush will perform a round trip to the server and return when it -// receives the internal reply. -func (c *EncodedConn) Flush() error { - return c.Conn.Flush() -} - -// Close will close the connection to the server. This call will release -// all blocking calls, such as Flush(), etc. -func (c *EncodedConn) Close() { - c.Conn.Close() -} - -// Drain will put a connection into a drain state. All subscriptions will -// immediately be put into a drain state. Upon completion, the publishers -// will be drained and can not publish any additional messages. Upon draining -// of the publishers, the connection will be closed. Use the ClosedCB() -// option to know when the connection has moved from draining to closed. -func (c *EncodedConn) Drain() error { - return c.Conn.Drain() -} - -// LastError reports the last error encountered via the Connection. -func (c *EncodedConn) LastError() error { - return c.Conn.LastError() -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go deleted file mode 100644 index c1d0f6f..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2012-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package builtin - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "unsafe" -) - -// DefaultEncoder implementation for EncodedConn. -// This encoder will leave []byte and string untouched, but will attempt to -// turn numbers into appropriate strings that can be decoded. It will also -// properly encoded and decode bools. If will encode a struct, but if you want -// to properly handle structures you should use JsonEncoder. -type DefaultEncoder struct { - // Empty -} - -var trueB = []byte("true") -var falseB = []byte("false") -var nilB = []byte("") - -// Encode -func (je *DefaultEncoder) Encode(subject string, v any) ([]byte, error) { - switch arg := v.(type) { - case string: - bytes := *(*[]byte)(unsafe.Pointer(&arg)) - return bytes, nil - case []byte: - return arg, nil - case bool: - if arg { - return trueB, nil - } else { - return falseB, nil - } - case nil: - return nilB, nil - default: - var buf bytes.Buffer - fmt.Fprintf(&buf, "%+v", arg) - return buf.Bytes(), nil - } -} - -// Decode -func (je *DefaultEncoder) Decode(subject string, data []byte, vPtr any) error { - // Figure out what it's pointing to... - sData := *(*string)(unsafe.Pointer(&data)) - switch arg := vPtr.(type) { - case *string: - *arg = sData - return nil - case *[]byte: - *arg = data - return nil - case *int: - n, err := strconv.ParseInt(sData, 10, 64) - if err != nil { - return err - } - *arg = int(n) - return nil - case *int32: - n, err := strconv.ParseInt(sData, 10, 64) - if err != nil { - return err - } - *arg = int32(n) - return nil - case *int64: - n, err := strconv.ParseInt(sData, 10, 64) - if err != nil { - return err - } - *arg = int64(n) - return nil - case *float32: - n, err := strconv.ParseFloat(sData, 32) - if err != nil { - return err - } - *arg = float32(n) - return nil - case *float64: - n, err := strconv.ParseFloat(sData, 64) - if err != nil { - return err - } - *arg = float64(n) - return nil - case *bool: - b, err := strconv.ParseBool(sData) - if err != nil { - return err - } - *arg = b - return nil - default: - vt := reflect.TypeOf(arg).Elem() - return fmt.Errorf("nats: Default Encoder can't decode to type %s", vt) - } -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go deleted file mode 100644 index 7ecf85e..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2013-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package builtin - -import ( - "bytes" - "encoding/gob" -) - -// GobEncoder is a Go specific GOB Encoder implementation for EncodedConn. -// This encoder will use the builtin encoding/gob to Marshal -// and Unmarshal most types, including structs. -type GobEncoder struct { - // Empty -} - -// FIXME(dlc) - This could probably be more efficient. - -// Encode -func (ge *GobEncoder) Encode(subject string, v any) ([]byte, error) { - b := new(bytes.Buffer) - enc := gob.NewEncoder(b) - if err := enc.Encode(v); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -// Decode -func (ge *GobEncoder) Decode(subject string, data []byte, vPtr any) (err error) { - dec := gob.NewDecoder(bytes.NewBuffer(data)) - err = dec.Decode(vPtr) - return -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go deleted file mode 100644 index 0540d98..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2012-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package builtin - -import ( - "encoding/json" - "strings" -) - -// JsonEncoder is a JSON Encoder implementation for EncodedConn. -// This encoder will use the builtin encoding/json to Marshal -// and Unmarshal most types, including structs. -type JsonEncoder struct { - // Empty -} - -// Encode -func (je *JsonEncoder) Encode(subject string, v any) ([]byte, error) { - b, err := json.Marshal(v) - if err != nil { - return nil, err - } - return b, nil -} - -// Decode -func (je *JsonEncoder) Decode(subject string, data []byte, vPtr any) (err error) { - switch arg := vPtr.(type) { - case *string: - // If they want a string and it is a JSON string, strip quotes - // This allows someone to send a struct but receive as a plain string - // This cast should be efficient for Go 1.3 and beyond. - str := string(data) - if strings.HasPrefix(str, `"`) && strings.HasSuffix(str, `"`) { - *arg = str[1 : len(str)-1] - } else { - *arg = str - } - case *[]byte: - *arg = data - default: - err = json.Unmarshal(data, arg) - } - return -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/go_test.mod b/backend/services/controller/vendor/github.com/nats-io/nats.go/go_test.mod deleted file mode 100644 index 4e747bf..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/go_test.mod +++ /dev/null @@ -1,22 +0,0 @@ -module github.com/nats-io/nats.go - -go 1.19 - -require ( - github.com/golang/protobuf v1.4.2 - github.com/klauspost/compress v1.17.4 - github.com/nats-io/nats-server/v2 v2.10.9 - github.com/nats-io/nkeys v0.4.7 - github.com/nats-io/nuid v1.0.1 - go.uber.org/goleak v1.2.1 - golang.org/x/text v0.14.0 - google.golang.org/protobuf v1.23.0 -) - -require ( - github.com/minio/highwayhash v1.0.2 // indirect - github.com/nats-io/jwt/v2 v2.5.3 // indirect - golang.org/x/crypto v0.18.0 // indirect - golang.org/x/sys v0.16.0 // indirect - golang.org/x/time v0.5.0 // indirect -) diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/go_test.sum b/backend/services/controller/vendor/github.com/nats-io/nats.go/go_test.sum deleted file mode 100644 index e33016d..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/go_test.sum +++ /dev/null @@ -1,47 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= -github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/nats-io/jwt/v2 v2.5.3 h1:/9SWvzc6hTfamcgXJ3uYRpgj+QuY2aLNqRiqrKcrpEo= -github.com/nats-io/jwt/v2 v2.5.3/go.mod h1:iysuPemFcc7p4IoYots3IuELSI4EDe9Y0bQMe+I3Bf4= -github.com/nats-io/nats-server/v2 v2.10.9 h1:VEW43Zz+p+9lARtiPM9ctd6ckun+92ZT2T17HWtwiFI= -github.com/nats-io/nats-server/v2 v2.10.9/go.mod h1:oorGiV9j3BOLLO3ejQe+U7pfAGyPo+ppD7rpgNF6KTQ= -github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI= -github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc= -github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/internal/parser/parse.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/internal/parser/parse.go deleted file mode 100644 index 7eab8ad..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/internal/parser/parse.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2020-2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import ( - "errors" - "fmt" -) - -const ( - AckDomainTokenPos = iota + 2 - AckAccHashTokenPos - AckStreamTokenPos - AckConsumerTokenPos - AckNumDeliveredTokenPos - AckStreamSeqTokenPos - AckConsumerSeqTokenPos - AckTimestampSeqTokenPos - AckNumPendingTokenPos -) - -var ErrInvalidSubjectFormat = errors.New("invalid format of ACK subject") - -// Quick parser for positive numbers in ack reply encoding. -// NOTE: This parser does not detect uint64 overflow -func ParseNum(d string) (n uint64) { - if len(d) == 0 { - return 0 - } - - // ASCII numbers 0-9 - const ( - asciiZero = 48 - asciiNine = 57 - ) - - for _, dec := range d { - if dec < asciiZero || dec > asciiNine { - return 0 - } - n = n*10 + uint64(dec) - asciiZero - } - return -} - -func GetMetadataFields(subject string) ([]string, error) { - v1TokenCounts, v2TokenCounts := 9, 12 - - var start int - tokens := make([]string, 0, v2TokenCounts) - for i := 0; i < len(subject); i++ { - if subject[i] == '.' { - tokens = append(tokens, subject[start:i]) - start = i + 1 - } - } - tokens = append(tokens, subject[start:]) - // - // Newer server will include the domain name and account hash in the subject, - // and a token at the end. - // - // Old subject was: - // $JS.ACK....... - // - // New subject would be: - // $JS.ACK.......... - // - // v1 has 9 tokens, v2 has 12, but we must not be strict on the 12th since - // it may be removed in the future. Also, the library has no use for it. - // The point is that a v2 ACK subject is valid if it has at least 11 tokens. - // - tokensLen := len(tokens) - // If lower than 9 or more than 9 but less than 11, report an error - if tokensLen < v1TokenCounts || (tokensLen > v1TokenCounts && tokensLen < v2TokenCounts-1) { - return nil, ErrInvalidSubjectFormat - } - if tokens[0] != "$JS" || tokens[1] != "ACK" { - return nil, fmt.Errorf("%w: subject should start with $JS.ACK", ErrInvalidSubjectFormat) - } - // For v1 style, we insert 2 empty tokens (domain and hash) so that the - // rest of the library references known fields at a constant location. - if tokensLen == v1TokenCounts { - // Extend the array (we know the backend is big enough) - tokens = append(tokens[:AckDomainTokenPos+2], tokens[AckDomainTokenPos:]...) - // Clear the domain and hash tokens - tokens[AckDomainTokenPos], tokens[AckAccHashTokenPos] = "", "" - - } else if tokens[AckDomainTokenPos] == "_" { - // If domain is "_", replace with empty value. - tokens[AckDomainTokenPos] = "" - } - return tokens, nil -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/README.md b/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/README.md deleted file mode 100644 index e2ee01e..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/README.md +++ /dev/null @@ -1,989 +0,0 @@ -# JetStream Simplified Client - -This doc covers the basic usage of the `jetstream` package in `nats.go` client. - -- [JetStream Simplified Client](#jetstream-simplified-client) - - [Overview](#overview) - - [Basic usage](#basic-usage) - - [Streams](#streams) - - [Stream management (CRUD)](#stream-management-crud) - - [Listing streams and stream names](#listing-streams-and-stream-names) - - [Stream-specific operations](#stream-specific-operations) - - [Consumers](#consumers) - - [Consumers management](#consumers-management) - - [Listing consumers and consumer - names](#listing-consumers-and-consumer-names) - - [Ordered consumers](#ordered-consumers) - - [Receiving messages from the - consumer](#receiving-messages-from-the-consumer) - - [Single fetch](#single-fetch) - - [Continuous polling](#continuous-polling) - - [Using `Consume()` receive messages in a - callback](#using-consume-receive-messages-in-a-callback) - - [Using `Messages()` to iterate over incoming - messages](#using-messages-to-iterate-over-incoming-messages) - - [Publishing on stream](#publishing-on-stream) - - [Synchronous publish](#synchronous-publish) - - [Async publish](#async-publish) - - [KeyValue Store](#keyvalue-store) - - [Basic usage of KV bucket](#basic-usage-of-kv-bucket) - - [Watching for changes on a bucket](#watching-for-changes-on-a-bucket) - - [Additional operations on a bucket](#additional-operations-on-a-bucket) - - [Examples](#examples) - -## Overview - -`jetstream` package is a new client API to interact with NATS JetStream, aiming -to replace the JetStream client implementation from `nats` package. The main -goal of this package is to provide a simple and clear way to interact with -JetStream API. Key differences between `jetstream` and `nats` packages include: - -- Using smaller, simpler interfaces to manage streams and consumers -- Using more granular and predictable approach to consuming messages from a - stream, instead of relying on often complicated and unpredictable - `Subscribe()` method (and all of its flavors) -- Allowing the usage of pull consumers to continuously receive incoming messages - (including ordered consumer functionality) -- Separating JetStream context from core NATS - -`jetstream` package provides several ways of interacting with the API: - -- `JetStream` - top-level interface, used to create and manage streams, - consumers and publishing messages -- `Stream` - used to manage consumers for a specific stream, as well as - performing stream-specific operations (purging, fetching and deleting messages - by sequence number, fetching stream info) -- `Consumer` - used to get information about a consumer as well as consuming - messages -- `Msg` - used for message-specific operations - reading data, headers and - metadata, as well as performing various types of acknowledgements - -Additionally, `jetstream` exposes [KeyValue Store](#keyvalue-store) and -[ObjectStore](#object-store) capabilities. KV and Object stores are abstraction -layers on top of JetStream Streams, simplifying key value and large data -storage on Streams. - -> __NOTE__: `jetstream` requires nats-server >= 2.9.0 to work correctly. - -## Basic usage - -```go -package main - -import ( - "context" - "fmt" - "strconv" - "time" - - "github.com/nats-io/nats.go" - "github.com/nats-io/nats.go/jetstream" -) - -func main() { - // In the `jetstream` package, almost all API calls rely on `context.Context` for timeout/cancellation handling - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - nc, _ := nats.Connect(nats.DefaultURL) - - // Create a JetStream management interface - js, _ := jetstream.New(nc) - - // Create a stream - s, _ := js.CreateStream(ctx, jetstream.StreamConfig{ - Name: "ORDERS", - Subjects: []string{"ORDERS.*"}, - }) - - // Publish some messages - for i := 0; i < 100; i++ { - js.Publish(ctx, "ORDERS.new", []byte("hello message "+strconv.Itoa(i))) - fmt.Printf("Published hello message %d\n", i) - } - - // Create durable consumer - c, _ := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ - Durable: "CONS", - AckPolicy: jetstream.AckExplicitPolicy, - }) - - // Get 10 messages from the consumer - messageCounter := 0 - msgs, _ := c.Fetch(10) - for msg := range msgs.Messages() { - msg.Ack() - fmt.Printf("Received a JetStream message via fetch: %s\n", string(msg.Data())) - messageCounter++ - } - fmt.Printf("received %d messages\n", messageCounter) - if msgs.Error() != nil { - fmt.Println("Error during Fetch(): ", msgs.Error()) - } - - // Receive messages continuously in a callback - cons, _ := c.Consume(func(msg jetstream.Msg) { - msg.Ack() - fmt.Printf("Received a JetStream message via callback: %s\n", string(msg.Data())) - messageCounter++ - }) - defer cons.Stop() - - // Iterate over messages continuously - it, _ := c.Messages() - for i := 0; i < 10; i++ { - msg, _ := it.Next() - msg.Ack() - fmt.Printf("Received a JetStream message via iterator: %s\n", string(msg.Data())) - messageCounter++ - } - it.Stop() - - // block until all 100 published messages have been processed - for messageCounter < 100 { - time.Sleep(10 * time.Millisecond) - } -} -``` - -## Streams - -`jetstream` provides methods to manage and list streams, as well as perform -stream-specific operations (purging, fetching/deleting messages by sequence id) - -### Stream management (CRUD) - -```go -js, _ := jetstream.New(nc) - -// create a stream (this is an idempotent operation) -s, _ := js.CreateStream(ctx, jetstream.StreamConfig{ - Name: "ORDERS", - Subjects: []string{"ORDERS.*"}, -}) - -// update a stream -s, _ = js.UpdateStream(ctx, jetstream.StreamConfig{ - Name: "ORDERS", - Subjects: []string{"ORDERS.*"}, - Description: "updated stream", -}) - -// get stream handle -s, _ = js.Stream(ctx, "ORDERS") - -// delete a stream -js.DeleteStream(ctx, "ORDERS") -``` - -### Listing streams and stream names - -```go -// list streams -streams := js.ListStreams(ctx) -for s := range streams.Info() { - fmt.Println(s.Config.Name) -} -if streams.Err() != nil { - fmt.Println("Unexpected error occurred") -} - -// list stream names -names := js.StreamNames(ctx) -for name := range names.Name() { - fmt.Println(name) -} -if names.Err() != nil { - fmt.Println("Unexpected error occurred") -} -``` - -### Stream-specific operations - -Using `Stream` interface, it is also possible to: - -- Purge a stream - -```go -// remove all messages from a stream -_ = s.Purge(ctx) - -// remove all messages from a stream that are stored on a specific subject -_ = s.Purge(ctx, jetstream.WithPurgeSubject("ORDERS.new")) - -// remove all messages up to specified sequence number -_ = s.Purge(ctx, jetstream.WithPurgeSequence(100)) - -// remove messages, but keep 10 newest -_ = s.Purge(ctx, jetstream.WithPurgeKeep(10)) -``` - -- Get and messages from stream - -```go -// get message from stream with sequence number == 100 -msg, _ := s.GetMsg(ctx, 100) - -// get last message from "ORDERS.new" subject -msg, _ = s.GetLastMsgForSubject(ctx, "ORDERS.new") - -// delete a message with sequence number == 100 -_ = s.DeleteMsg(ctx, 100) -``` - -- Get information about a stream - -```go -// Fetches latest stream info from server -info, _ := s.Info(ctx) -fmt.Println(info.Config.Name) - -// Returns the most recently fetched StreamInfo, without making an API call to the server -cachedInfo := s.CachedInfo() -fmt.Println(cachedInfo.Config.Name) -``` - -## Consumers - -Only pull consumers are supported in `jetstream` package. However, unlike the -JetStream API in `nats` package, pull consumers allow for continuous message -retrieval (similarly to how `nats.Subscribe()` works). Because of that, push -consumers can be easily replaced by pull consumers for most of the use cases. - -### Consumers management - -CRUD operations on consumers can be achieved on 2 levels: - -- on `JetStream` interface - -```go -js, _ := jetstream.New(nc) - -// create a consumer (this is an idempotent operation) -// an error will be returned if consumer already exists and has different configuration. -cons, _ := js.CreateConsumer(ctx, "ORDERS", jetstream.ConsumerConfig{ - Durable: "foo", - AckPolicy: jetstream.AckExplicitPolicy, -}) - -// create an ephemeral pull consumer by not providing `Durable` -ephemeral, _ := js.CreateConsumer(ctx, "ORDERS", jetstream.ConsumerConfig{ - AckPolicy: jetstream.AckExplicitPolicy, -}) - - -// consumer can also be created using CreateOrUpdateConsumer -// this method will either create a consumer if it does not exist -// or update existing consumer (if possible) -cons2 := js.CreateOrUpdateConsumer(ctx, "ORDERS", jetstream.ConsumerConfig{ - Name: "bar", -}) - -// consumers can be updated -// an error will be returned if consumer with given name does not exist -// or an illegal property is to be updated (e.g. AckPolicy) -updated, _ := js.UpdateConsumer(ctx, "ORDERS", jetstream.ConsumerConfig{ - AckPolicy: jetstream.AckExplicitPolicy, - Description: "updated consumer" -}) - -// get consumer handle -cons, _ = js.Consumer(ctx, "ORDERS", "foo") - -// delete a consumer -js.DeleteConsumer(ctx, "ORDERS", "foo") -``` - -- on `Stream` interface - -```go -// Create a JetStream management interface -js, _ := jetstream.New(nc) - -// get stream handle -stream, _ := js.Stream(ctx, "ORDERS") - -// create consumer -cons, _ := stream.CreateConsumer(ctx, jetstream.ConsumerConfig{ - Durable: "foo", - AckPolicy: jetstream.AckExplicitPolicy, -}) - -// get consumer handle -cons, _ = stream.Consumer(ctx, "ORDERS", "foo") - -// delete a consumer -stream.DeleteConsumer(ctx, "foo") -``` - -`Consumer` interface, returned when creating/fetching consumers, allows fetching -`ConsumerInfo`: - -```go -// Fetches latest consumer info from server -info, _ := cons.Info(ctx) -fmt.Println(info.Config.Durable) - -// Returns the most recently fetched ConsumerInfo, without making an API call to the server -cachedInfo := cons.CachedInfo() -fmt.Println(cachedInfo.Config.Durable) -``` - -### Listing consumers and consumer names - -```go -// list consumers -consumers := s.ListConsumers(ctx) -for cons := range consumers.Info() { - fmt.Println(cons.Name) -} -if consumers.Err() != nil { - fmt.Println("Unexpected error occurred") -} - -// list consumer names -names := s.ConsumerNames(ctx) -for name := range names.Name() { - fmt.Println(name) -} -if names.Err() != nil { - fmt.Println("Unexpected error occurred") -} -``` - -### Ordered consumers - -`jetstream`, in addition to basic named/ephemeral consumers, supports ordered -consumer functionality. Ordered is strictly processing messages in the order -that they were stored on the stream, providing a consistent and deterministic -message ordering. It is also resilient to consumer deletion. - -Ordered consumers present the same set of message consumption methods as -standard pull consumers. - -```go -js, _ := jetstream.New(nc) - -// create a consumer (this is an idempotent operation) -cons, _ := js.OrderedConsumer(ctx, "ORDERS", jetstream.OrderedConsumerConfig{ - // Filter results from "ORDERS" stream by specific subject - FilterSubjects: []{"ORDERS.A"}, -}) -``` - -### Receiving messages from the consumer - -The `Consumer` interface covers allows fetching messages on demand, with -pre-defined batch size on bytes limit, or continuous push-like receiving of -messages. - -#### __Single fetch__ - -This pattern pattern allows fetching a defined number of messages in a single -RPC. - -- Using `Fetch` or `FetchBytes`, consumer will return up to the provided number -of messages/bytes. By default, `Fetch()` will wait 30 seconds before timing out -(this behavior can be configured using `FetchMaxWait()` option): - -```go -// receive up to 10 messages from the stream -msgs, _ := c.Fetch(10) -for msg := range msgs.Messages() { - fmt.Printf("Received a JetStream message: %s\n", string(msg.Data())) -} -if msgs.Error() != nil { - // handle error -} - -// receive up to 1024 B of data -msgs, _ := c.FetchBytes(1024) -for msg := range msgs.Messages() { - fmt.Printf("Received a JetStream message: %s\n", string(msg.Data())) -} -if msgs.Error() != nil { - // handle error -} -``` - -Similarly, `FetchNoWait()` can be used in order to only return messages from the -stream available at the time of sending request: - -```go -// FetchNoWait will not wait for new messages if the whole batch is not available at the time of sending request. -msgs, _ := c.FetchNoWait(10) -for msg := range msgs.Messages() { - fmt.Printf("Received a JetStream message: %s\n", string(msg.Data())) -} -if msgs.Error() != nil { - // handle error -} -``` - -> __Warning__: Both `Fetch()` and `FetchNoWait()` have worse performance when -> used to continuously retrieve messages in comparison to `Messages()` or -`Consume()` methods, as they do not perform any optimizations (pre-buffering) -and new subscription is created for each execution. - -#### Continuous polling - -There are 2 ways to achieve push-like behavior using pull consumers in -`jetstream` package. Both `Messages()` and `Consume()` methods perform similar optimizations -and for most cases can be used interchangeably. - -There is an advantage of using `Messages()` instead of `Consume()` for work-queue scenarios, -where messages should be fetched one by one, as it allows for finer control over fetching -single messages on demand. - -Subject filtering is achieved by configuring a consumer with a `FilterSubject` -value. - -##### Using `Consume()` receive messages in a callback - -```go -cons, _ := js.CreateOrUpdateConsumer("ORDERS", jetstream.ConsumerConfig{ - AckPolicy: jetstream.AckExplicitPolicy, - // receive messages from ORDERS.A subject only - FilterSubject: "ORDERS.A" -})) - -consContext, _ := c.Consume(func(msg jetstream.Msg) { - fmt.Printf("Received a JetStream message: %s\n", string(msg.Data())) -}) -defer consContext.Stop() -``` - -Similarly to `Messages()`, `Consume()` can be supplied with options to modify -the behavior of a single pull request: - -- `PullMaxMessages(int)` - up to provided number of messages will be buffered -- `PullMaxBytes(int)` - up to provided number of bytes will be buffered. This -setting and `PullMaxMessages` are mutually exclusive -- `PullExpiry(time.Duration)` - timeout on a single pull request to the server -type PullThresholdMessages int -- `PullThresholdMessages(int)` - amount of messages which triggers refilling the - buffer -- `PullThresholdBytes(int)` - amount of bytes which triggers refilling the - buffer -- `PullHeartbeat(time.Duration)` - idle heartbeat duration for a single pull -request. An error will be triggered if at least 2 heartbeats are missed -- `WithConsumeErrHandler(func (ConsumeContext, error))` - when used, sets a - custom error handler on `Consume()`, allowing e.g. tracking missing - heartbeats. - -> __NOTE__: `Stop()` should always be called on `ConsumeContext` to avoid -> leaking goroutines. - -##### Using `Messages()` to iterate over incoming messages - -```go -iter, _ := cons.Messages() -for { - msg, err := iter.Next() - // Next can return error, e.g. when iterator is closed or no heartbeats were received - if err != nil { - //handle error - } - fmt.Printf("Received a JetStream message: %s\n", string(msg.Data())) - msg.Ack() -} -iter.Stop() -``` - -It can also be configured to only store up to defined number of messages/bytes -in the buffer. - -```go -// a maximum of 10 messages or 1024 bytes will be stored in memory (whichever is encountered first) -iter, _ := cons.Messages(jetstream.PullMaxMessages(10), jetstream.PullMaxBytes(1024)) -``` - -`Messages()` exposes the following options: - -- `PullMaxMessages(int)` - up to provided number of messages will be buffered -- `PullMaxBytes(int)` - up to provided number of bytes will be buffered. This -setting and `PullMaxMessages` are mutually exclusive -- `PullExpiry(time.Duration)` - timeout on a single pull request to the server -type PullThresholdMessages int -- `PullThresholdMessages(int)` - amount of messages which triggers refilling the - buffer -- `PullThresholdBytes(int)` - amount of bytes which triggers refilling the - buffer -- `PullHeartbeat(time.Duration)` - idle heartbeat duration for a single pull -request. An error will be triggered if at least 2 heartbeats are missed (unless -`WithMessagesErrOnMissingHeartbeat(false)` is used) - -##### Using `Messages()` to fetch single messages one by one - -When implementing work queue, it is possible to use `Messages()` in order to -fetch messages from the server one-by-one, without optimizations and -pre-buffering (to avoid redeliveries when processing messages at slow rate). - -```go -// PullMaxMessages determines how many messages will be sent to the client in a single pull request -iter, _ := cons.Messages(jetstream.PullMaxMessages(1)) -numWorkers := 5 -sem := make(chan struct{}, numWorkers) -for { - sem <- struct{}{} - go func() { - defer func() { - <-sem - }() - msg, err := iter.Next() - if err != nil { - // handle err - } - fmt.Printf("Processing msg: %s\n", string(msg.Data())) - doWork() - msg.Ack() - }() -} -``` - -## Publishing on stream - -`JetStream` interface allows publishing messages on stream in 2 ways: - -### __Synchronous publish__ - -```go -js, _ := jetstream.New(nc) - -// Publish message on subject ORDERS.new -// Given subject has to belong to a stream -ack, err := js.PublishMsg(ctx, &nats.Msg{ - Data: []byte("hello"), - Subject: "ORDERS.new", -}) -fmt.Printf("Published msg with sequence number %d on stream %q", ack.Sequence, ack.Stream) - -// A helper method accepting subject and data as parameters -ack, err = js.Publish(ctx, "ORDERS.new", []byte("hello")) -``` - -Both `Publish()` and `PublishMsg()` can be supplied with options allowing -setting various headers. Additionally, for `PublishMsg()` headers can be set -directly on `nats.Msg`. - -```go -// All 3 implementations are work identically -ack, err := js.PublishMsg(ctx, &nats.Msg{ - Data: []byte("hello"), - Subject: "ORDERS.new", - Header: nats.Header{ - "Nats-Msg-Id": []string{"id"}, - }, -}) - -ack, err = js.PublishMsg(ctx, &nats.Msg{ - Data: []byte("hello"), - Subject: "ORDERS.new", -}, jetstream.WithMsgID("id")) - -ack, err = js.Publish(ctx, "ORDERS.new", []byte("hello"), jetstream.WithMsgID("id")) -``` - -### __Async publish__ - -```go -js, _ := jetstream.New(nc) - -// publish message and do not wait for ack -ackF, err := js.PublishMsgAsync(ctx, &nats.Msg{ - Data: []byte("hello"), - Subject: "ORDERS.new", -}) - -// block and wait for ack -select { -case ack := <-ackF.Ok(): - fmt.Printf("Published msg with sequence number %d on stream %q", ack.Sequence, ack.Stream) -case err := <-ackF.Err(): - fmt.Println(err) -} - -// similarly to synchronous publish, there is a helper method accepting subject and data -ackF, err = js.PublishAsync("ORDERS.new", []byte("hello")) -``` - -Just as for synchronous publish, `PublishAsync()` and `PublishMsgAsync()` accept -options for setting headers. - -## KeyValue Store - -JetStream KeyValue Stores offer a straightforward method for storing key-value -pairs within JetStream. These stores are supported by a specially configured -stream, designed to efficiently and compactly store these pairs. This structure -ensures rapid and convenient access to the data. - -The KV Store, also known as a bucket, enables the execution of various operations: - -- create/update a value for a given key -- get a value for a given key -- delete a value for a given key -- purge all values from a bucket -- list all keys in a bucket -- watch for changes on given key set or the whole bucket -- retrieve history of changes for a given key - -### Basic usage of KV bucket - -The most basic usage of KV bucket is to create or retrieve a bucket and perform -basic CRUD operations on keys. - -```go -js, _ := jetstream.New(nc) -ctx := context.Background() - -// Create a new bucket. Bucket name is required and has to be unique within a JetStream account. -kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"}) - -// Set a value for a given key -// Put will either create or update a value for a given key -kv.Put(ctx, "sue.color", []byte("blue")) - -// Get an entry for a given key -// Entry contains key/value, but also metadata (revision, timestamp, etc.)) -entry, _ := kv.Get(ctx, "sue.color") - -// Prints `sue.color @ 1 -> "blue"` -fmt.Printf("%s @ %d -> %q\n", entry.Key(), entry.Revision(), string(entry.Value())) - -// Update a value for a given key -// Update will fail if the key does not exist or the revision has changed -kv.Update(ctx, "sue.color", []byte("red"), 1) - -// Create will fail if the key already exists -_, err := kv.Create(ctx, "sue.color", []byte("purple")) -fmt.Println(err) // prints `nats: key exists` - -// Delete a value for a given key. -// Delete is not destructive, it will add a delete marker for a given key -// and all previous revisions will still be available -kv.Delete(ctx, "sue.color") - -// getting a deleted key will return an error -_, err = kv.Get(ctx, "sue.color") -fmt.Println(err) // prints `nats: key not found` - -// A bucket can be deleted once it is no longer needed -js.DeleteKeyValue(ctx, "profiles") -``` - -### Watching for changes on a bucket - -KV buckets support Watchers, which can be used to watch for changes on a given -key or the whole bucket. Watcher will receive a notification on a channel when a -change occurs. By default, watcher will return initial values for all matching -keys. After sending all initial values, watcher will send nil on the channel to -signal that all initial values have been sent and it will start sending updates when -changes occur. - -Watcher supports several configuration options: - -- `IncludeHistory` will have the key watcher send all historical values -for each key (up to KeyValueMaxHistory). -- `IgnoreDeletes` will have the key watcher not pass any keys with -delete markers. -- `UpdatesOnly` will have the key watcher only pass updates on values -(without values already present when starting). -- `MetaOnly` will have the key watcher retrieve only the entry metadata, not the entry value. -- `ResumeFromRevision` instructs the key watcher to resume from a -specific revision number. - -```go -js, _ := jetstream.New(nc) -ctx := context.Background() -kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"}) - -kv.Put(ctx, "sue.color", []byte("blue")) - -// A watcher can be created to watch for changes on a given key or the whole bucket -// By default, watcher will return most recent values for all matching keys. -// Watcher can be configured to only return updates by using jetstream.UpdatesOnly() option. -watcher, _ := kv.Watch(ctx, "sue.*") -defer watcher.Stop() - -kv.Put(ctx, "sue.age", []byte("43")) -kv.Put(ctx, "sue.color", []byte("red")) - -// First, the watcher sends most recent values for all matching keys. -// In this case, it will send a single entry for `sue.color`. -entry := <-watcher.Updates() -// Prints `sue.color @ 1 -> "blue"` -fmt.Printf("%s @ %d -> %q\n", entry.Key(), entry.Revision(), string(entry.Value())) - -// After all current values have been sent, watcher will send nil on the channel. -entry = <-watcher.Updates() -if entry != nil { - fmt.Println("Unexpected entry received") -} - -// After that, watcher will send updates when changes occur -// In this case, it will send an entry for `sue.color` and `sue.age`. - -entry = <-watcher.Updates() -// Prints `sue.age @ 2 -> "43"` -fmt.Printf("%s @ %d -> %q\n", entry.Key(), entry.Revision(), string(entry.Value())) - -entry = <-watcher.Updates() -// Prints `sue.color @ 3 -> "red"` -fmt.Printf("%s @ %d -> %q\n", entry.Key(), entry.Revision(), string(entry.Value())) -``` - -### Additional operations on a bucket - -In addition to basic CRUD operations and watching for changes, KV buckets -support several additional operations: - -- `ListKeys` will return all keys in a bucket" - -```go -js, _ := jetstream.New(nc) -ctx := context.Background() -kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"}) - -kv.Put(ctx, "sue.color", []byte("blue")) -kv.Put(ctx, "sue.age", []byte("43")) -kv.Put(ctx, "bucket", []byte("profiles")) - -keys, _ := kv.ListKeys(ctx) - -// Prints all 3 keys -for key := range keys.Keys() { - fmt.Println(key) -} -``` - -- `Purge` and `PurgeDeletes` for removing all keys from a bucket - -```go -js, _ := jetstream.New(nc) -ctx := context.Background() -kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"}) - -kv.Put(ctx, "sue.color", []byte("blue")) -kv.Put(ctx, "sue.age", []byte("43")) -kv.Put(ctx, "bucket", []byte("profiles")) - -// Purge will remove all keys from a bucket. -// The latest revision of each key will be kept -// with a delete marker, all previous revisions will be removed -// permanently. -kv.Purge(ctx) - -// PurgeDeletes will remove all keys from a bucket -// with a delete marker. -kv.PurgeDeletes(ctx) -``` - -- `Status` will return the current status of a bucket - -```go -js, _ := jetstream.New(nc) -ctx := context.Background() -kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"}) - -kv.Put(ctx, "sue.color", []byte("blue")) -kv.Put(ctx, "sue.age", []byte("43")) -kv.Put(ctx, "bucket", []byte("profiles")) - -status, _ := kv.Status(ctx) - -fmt.Println(status.Bucket()) // prints `profiles` -fmt.Println(status.Values()) // prints `3` -fmt.Println(status.Bytes()) // prints the size of all values in bytes -``` - -## Object Store - -JetStream Object Stores offer a straightforward method for storing large objects -within JetStream. These stores are backed by a specially configured streams, -designed to efficiently and compactly store these objects. - -The Object Store, also known as a bucket, enables the execution of various -operations: - -- create/update an object -- get an object -- delete an object -- list all objects in a bucket -- watch for changes on objects in a bucket -- create links to other objects or other buckets - -### Basic usage of Object Store - -The most basic usage of Object bucket is to create or retrieve a bucket and -perform basic CRUD operations on objects. - -```go -js, _ := jetstream.New(nc) -ctx := context.Background() - -// Create a new bucket. Bucket name is required and has to be unique within a JetStream account. -os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"}) - -config1 := bytes.NewBufferString("first config") -// Put an object in a bucket. Put expects an object metadata and a reader -// to read the object data from. -os.Put(ctx, jetstream.ObjectMeta{Name: "config-1"}, config1) - -// Objects can also be created using various helper methods - -// 1. As raw strings -os.PutString(ctx, "config-2", "second config") - -// 2. As raw bytes -os.PutBytes(ctx, "config-3", []byte("third config")) - -// 3. As a file -os.PutFile(ctx, "config-4.txt") - -// Get an object -// Get returns a reader and object info -// Similar to Put, Get can also be used with helper methods -// to retrieve object data as a string, bytes or to save it to a file -object, _ := os.Get(ctx, "config-1") -data, _ := io.ReadAll(object) -info, _ := object.Info() - -// Prints `configs.config-1 -> "first config"` -fmt.Printf("%s.%s -> %q\n", info.Bucket, info.Name, string(data)) - -// Delete an object. -// Delete will remove object data from stream, but object metadata will be kept -// with a delete marker. -os.Delete(ctx, "config-1") - -// getting a deleted object will return an error -_, err := os.Get(ctx, "config-1") -fmt.Println(err) // prints `nats: object not found` - -// A bucket can be deleted once it is no longer needed -js.DeleteObjectStore(ctx, "configs") -``` - -### Watching for changes on a store - -Object Stores support Watchers, which can be used to watch for changes on -objects in a given bucket. Watcher will receive a notification on a channel when -a change occurs. By default, watcher will return latest information for all -objects in a bucket. After sending all initial values, watcher will send nil on -the channel to signal that all initial values have been sent and it will start -sending updates when changes occur. - ->__NOTE:__ Watchers do not retrieve values for objects, only metadata (containing ->information such as object name, bucket name, object size etc.). If object data ->is required, `Get` method should be used. - -Watcher supports several configuration options: - -- `IncludeHistory` will have the watcher send historical updates for each - object. -- `IgnoreDeletes` will have the watcher not pass any objects with delete - markers. -- `UpdatesOnly` will have the watcher only pass updates on objects (without - objects already present when starting). - -```go -js, _ := jetstream.New(nc) -ctx := context.Background() -os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"}) - -os.PutString(ctx, "config-1", "first config") - -// By default, watcher will return most recent values for all objects in a bucket. -// Watcher can be configured to only return updates by using jetstream.UpdatesOnly() option. -watcher, _ := os.Watch(ctx) -defer watcher.Stop() - -// create a second object -os.PutString(ctx, "config-2", "second config") - -// update metadata of the first object -os.UpdateMeta(ctx, "config-1", jetstream.ObjectMeta{Name: "config-1", Description: "updated config"}) - -// First, the watcher sends most recent values for all matching objects. -// In this case, it will send a single entry for `config-1`. -object := <-watcher.Updates() -// Prints `configs.config-1 -> ""` -fmt.Printf("%s.%s -> %q\n", object.Bucket, object.Name, object.Description) - -// After all current values have been sent, watcher will send nil on the channel. -object = <-watcher.Updates() -if object != nil { - fmt.Println("Unexpected object received") -} - -// After that, watcher will send updates when changes occur -// In this case, it will send an entry for `config-2` and `config-1`. -object = <-watcher.Updates() -// Prints `configs.config-2 -> ""` -fmt.Printf("%s.%s -> %q\n", object.Bucket, object.Name, object.Description) - -object = <-watcher.Updates() -// Prints `configs.config-1 -> "updated config"` -fmt.Printf("%s.%s -> %q\n", object.Bucket, object.Name, object.Description) -``` - -### Additional operations on a store - -In addition to basic CRUD operations and watching for changes, Object Stores -support several additional operations: - -- `UpdateMeta` for updating object metadata, such as name, description, etc. - -```go -js, _ := jetstream.New(nc) -ctx := context.Background() -os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"}) - -os.PutString(ctx, "config", "data") - -// update metadata of the object to e.g. add a description -os.UpdateMeta(ctx, "config", jetstream.ObjectMeta{Name: "config", Description: "this is a config"}) - -// object can be moved under a new name (unless it already exists) -os.UpdateMeta(ctx, "config", jetstream.ObjectMeta{Name: "config-1", Description: "updated config"}) -``` - -- `List` for listing information about all objects in a bucket: - -```go -js, _ := jetstream.New(nc) -ctx := context.Background() -os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"}) - -os.PutString(ctx, "config-1", "cfg1") -os.PutString(ctx, "config-2", "cfg1") -os.PutString(ctx, "config-3", "cfg1") - -// List will return information about all objects in a bucket -objects, _ := os.List(ctx) - -// Prints all 3 objects -for _, object := range objects { - fmt.Println(object.Name) -} -``` - -- `Status` will return the current status of a bucket - -```go -js, _ := jetstream.New(nc) -ctx := context.Background() -os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"}) - -os.PutString(ctx, "config-1", "cfg1") -os.PutString(ctx, "config-2", "cfg1") -os.PutString(ctx, "config-3", "cfg1") - -status, _ := os.Status(ctx) - -fmt.Println(status.Bucket()) // prints `configs` -fmt.Println(status.Size()) // prints the size of the bucket in bytes -``` - -## Examples - -You can find more examples of `jetstream` usage [here](https://github.com/nats-io/nats.go/tree/main/examples/jetstream). diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/api.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/api.go deleted file mode 100644 index 1cea088..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/api.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2022-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jetstream - -import ( - "context" - "encoding/json" - "strings" -) - -type ( - apiResponse struct { - Type string `json:"type"` - Error *APIError `json:"error,omitempty"` - } - - // apiPaged includes variables used to create paged responses from the JSON API - apiPaged struct { - Total int `json:"total"` - Offset int `json:"offset"` - Limit int `json:"limit"` - } -) - -// Request API subjects for JetStream. -const ( - // DefaultAPIPrefix is the default prefix for the JetStream API. - DefaultAPIPrefix = "$JS.API." - - // jsDomainT is used to create JetStream API prefix by specifying only Domain - jsDomainT = "$JS.%s.API." - - // jsExtDomainT is used to create a StreamSource External APIPrefix - jsExtDomainT = "$JS.%s.API" - - // apiAccountInfo is for obtaining general information about JetStream. - apiAccountInfo = "INFO" - - // apiConsumerCreateT is used to create consumers. - apiConsumerCreateT = "CONSUMER.CREATE.%s.%s" - - // apiConsumerCreateT is used to create consumers. - // it accepts stream name, consumer name and filter subject - apiConsumerCreateWithFilterSubjectT = "CONSUMER.CREATE.%s.%s.%s" - - // apiConsumerInfoT is used to create consumers. - apiConsumerInfoT = "CONSUMER.INFO.%s.%s" - - // apiRequestNextT is the prefix for the request next message(s) for a consumer in worker/pull mode. - apiRequestNextT = "CONSUMER.MSG.NEXT.%s.%s" - - // apiConsumerDeleteT is used to delete consumers. - apiConsumerDeleteT = "CONSUMER.DELETE.%s.%s" - - // apiConsumerListT is used to return all detailed consumer information - apiConsumerListT = "CONSUMER.LIST.%s" - - // apiConsumerNamesT is used to return a list with all consumer names for the stream. - apiConsumerNamesT = "CONSUMER.NAMES.%s" - - // apiStreams can lookup a stream by subject. - apiStreams = "STREAM.NAMES" - - // apiStreamCreateT is the endpoint to create new streams. - apiStreamCreateT = "STREAM.CREATE.%s" - - // apiStreamInfoT is the endpoint to get information on a stream. - apiStreamInfoT = "STREAM.INFO.%s" - - // apiStreamUpdateT is the endpoint to update existing streams. - apiStreamUpdateT = "STREAM.UPDATE.%s" - - // apiStreamDeleteT is the endpoint to delete streams. - apiStreamDeleteT = "STREAM.DELETE.%s" - - // apiStreamPurgeT is the endpoint to purge streams. - apiStreamPurgeT = "STREAM.PURGE.%s" - - // apiStreamListT is the endpoint that will return all detailed stream information - apiStreamListT = "STREAM.LIST" - - // apiMsgGetT is the endpoint to get a message. - apiMsgGetT = "STREAM.MSG.GET.%s" - - // apiMsgGetT is the endpoint to perform a direct get of a message. - apiDirectMsgGetT = "DIRECT.GET.%s" - - // apiDirectMsgGetLastBySubjectT is the endpoint to perform a direct get of a message by subject. - apiDirectMsgGetLastBySubjectT = "DIRECT.GET.%s.%s" - - // apiMsgDeleteT is the endpoint to remove a message. - apiMsgDeleteT = "STREAM.MSG.DELETE.%s" -) - -func (js *jetStream) apiRequestJSON(ctx context.Context, subject string, resp any, data ...[]byte) (*jetStreamMsg, error) { - jsMsg, err := js.apiRequest(ctx, subject, data...) - if err != nil { - return nil, err - } - if err := json.Unmarshal(jsMsg.Data(), resp); err != nil { - return nil, err - } - return jsMsg, nil -} - -// a RequestWithContext with tracing via TraceCB -func (js *jetStream) apiRequest(ctx context.Context, subj string, data ...[]byte) (*jetStreamMsg, error) { - var req []byte - if len(data) > 0 { - req = data[0] - } - if js.clientTrace != nil { - ctrace := js.clientTrace - if ctrace.RequestSent != nil { - ctrace.RequestSent(subj, req) - } - } - resp, err := js.conn.RequestWithContext(ctx, subj, req) - if err != nil { - return nil, err - } - if js.clientTrace != nil { - ctrace := js.clientTrace - if ctrace.ResponseReceived != nil { - ctrace.ResponseReceived(subj, resp.Data, resp.Header) - } - } - - return js.toJSMsg(resp), nil -} - -func apiSubj(prefix, subject string) string { - if prefix == "" { - return subject - } - var b strings.Builder - b.WriteString(prefix) - b.WriteString(subject) - return b.String() -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/consumer.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/consumer.go deleted file mode 100644 index 7792ced..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/consumer.go +++ /dev/null @@ -1,325 +0,0 @@ -// Copyright 2022-2024 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jetstream - -import ( - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "strings" - - "github.com/nats-io/nuid" -) - -type ( - - // Consumer contains methods for fetching/processing messages from a stream, - // as well as fetching consumer info. - // - // This package provides two implementations of Consumer interface: - // - // - Standard named/ephemeral pull consumers. These consumers are created using - // CreateConsumer method on Stream or JetStream interface. They can be - // explicitly configured (using [ConsumerConfig]) and managed by the user, - // either from this package or externally. - // - // - Ordered consumers. These consumers are created using OrderedConsumer - // method on Stream or JetStream interface. They are managed by the library - // and provide a simple way to consume messages from a stream. Ordered - // consumers are ephemeral in-memory pull consumers and are resilient to - // deletes and restarts. They provide limited configuration options - // using [OrderedConsumerConfig]. - // - // Consumer provides method for optimized continuous consumption of messages - // using Consume and Messages methods, as well as simple one-off messages - // retrieval using Fetch and Next methods. - Consumer interface { - // Fetch is used to retrieve up to a provided number of messages from a - // stream. This method will send a single request and deliver either all - // requested messages unless time out is met earlier. Fetch timeout - // defaults to 30 seconds and can be configured using FetchMaxWait - // option. - // - // By default, Fetch uses a 5s idle heartbeat for requests longer than - // 10 seconds. For shorter requests, the idle heartbeat is disabled. - // This can be configured using FetchHeartbeat option. If a client does - // not receive a heartbeat message from a stream for more than 2 times - // the idle heartbeat setting, Fetch will return [ErrNoHeartbeat]. - // - // Fetch is non-blocking and returns MessageBatch, exposing a channel - // for delivered messages. - // - // Messages channel is always closed, thus it is safe to range over it - // without additional checks. - Fetch(batch int, opts ...FetchOpt) (MessageBatch, error) - - // FetchBytes is used to retrieve up to a provided bytes from the - // stream. This method will send a single request and deliver the - // provided number of bytes unless time out is met earlier. FetchBytes - // timeout defaults to 30 seconds and can be configured using - // FetchMaxWait option. - // - // By default, FetchBytes uses a 5s idle heartbeat for requests longer than - // 10 seconds. For shorter requests, the idle heartbeat is disabled. - // This can be configured using FetchHeartbeat option. If a client does - // not receive a heartbeat message from a stream for more than 2 times - // the idle heartbeat setting, Fetch will return ErrNoHeartbeat. - // - // FetchBytes is non-blocking and returns MessageBatch, exposing a channel - // for delivered messages. - // - // Messages channel is always closed, thus it is safe to range over it - // without additional checks. - FetchBytes(maxBytes int, opts ...FetchOpt) (MessageBatch, error) - - // FetchNoWait is used to retrieve up to a provided number of messages - // from a stream. Unlike Fetch, FetchNoWait will only deliver messages - // that are currently available in the stream and will not wait for new - // messages to arrive, even if batch size is not met. - // - // FetchNoWait is non-blocking and returns MessageBatch, exposing a - // channel for delivered messages. - // - // Messages channel is always closed, thus it is safe to range over it - // without additional checks. - FetchNoWait(batch int) (MessageBatch, error) - - // Consume will continuously receive messages and handle them - // with the provided callback function. Consume can be configured using - // PullConsumeOpt options: - // - // - Error handling and monitoring can be configured using ConsumeErrHandler - // option, which provides information about errors encountered during - // consumption (both transient and terminal) - // - Consume can be configured to stop after a certain number of - // messages is received using StopAfter option. - // - Consume can be optimized for throughput or memory usage using - // PullExpiry, PullMaxMessages, PullMaxBytes and PullHeartbeat options. - // Unless there is a specific use case, these options should not be used. - // - // Consume returns a ConsumeContext, which can be used to stop or drain - // the consumer. - Consume(handler MessageHandler, opts ...PullConsumeOpt) (ConsumeContext, error) - - // Messages returns MessagesContext, allowing continuously iterating - // over messages on a stream. Messages can be configured using - // PullMessagesOpt options: - // - // - Messages can be optimized for throughput or memory usage using - // PullExpiry, PullMaxMessages, PullMaxBytes and PullHeartbeat options. - // Unless there is a specific use case, these options should not be used. - // - WithMessagesErrOnMissingHeartbeat can be used to enable/disable - // erroring out on MessagesContext.Next when a heartbeat is missing. - // This option is enabled by default. - Messages(opts ...PullMessagesOpt) (MessagesContext, error) - - // Next is used to retrieve the next message from the consumer. This - // method will block until the message is retrieved or timeout is - // reached. - Next(opts ...FetchOpt) (Msg, error) - - // Info fetches current ConsumerInfo from the server. - Info(context.Context) (*ConsumerInfo, error) - - // CachedInfo returns ConsumerInfo currently cached on this consumer. - // This method does not perform any network requests. The cached - // ConsumerInfo is updated on every call to Info and Update. - CachedInfo() *ConsumerInfo - } - - createConsumerRequest struct { - Stream string `json:"stream_name"` - Config *ConsumerConfig `json:"config"` - Action string `json:"action"` - } -) - -// Info fetches current ConsumerInfo from the server. -func (p *pullConsumer) Info(ctx context.Context) (*ConsumerInfo, error) { - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - infoSubject := apiSubj(p.jetStream.apiPrefix, fmt.Sprintf(apiConsumerInfoT, p.stream, p.name)) - var resp consumerInfoResponse - - if _, err := p.jetStream.apiRequestJSON(ctx, infoSubject, &resp); err != nil { - return nil, err - } - if resp.Error != nil { - if resp.Error.ErrorCode == JSErrCodeConsumerNotFound { - return nil, ErrConsumerNotFound - } - return nil, resp.Error - } - if resp.Error == nil && resp.ConsumerInfo == nil { - return nil, ErrConsumerNotFound - } - - p.info = resp.ConsumerInfo - return resp.ConsumerInfo, nil -} - -// CachedInfo returns ConsumerInfo currently cached on this consumer. -// This method does not perform any network requests. The cached -// ConsumerInfo is updated on every call to Info and Update. -func (p *pullConsumer) CachedInfo() *ConsumerInfo { - return p.info -} - -func upsertConsumer(ctx context.Context, js *jetStream, stream string, cfg ConsumerConfig, action string) (Consumer, error) { - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - req := createConsumerRequest{ - Stream: stream, - Config: &cfg, - Action: action, - } - reqJSON, err := json.Marshal(req) - if err != nil { - return nil, err - } - - consumerName := cfg.Name - if consumerName == "" { - if cfg.Durable != "" { - consumerName = cfg.Durable - } else { - consumerName = generateConsName() - } - } - if err := validateConsumerName(consumerName); err != nil { - return nil, err - } - - var ccSubj string - if cfg.FilterSubject != "" && len(cfg.FilterSubjects) == 0 { - ccSubj = apiSubj(js.apiPrefix, fmt.Sprintf(apiConsumerCreateWithFilterSubjectT, stream, consumerName, cfg.FilterSubject)) - } else { - ccSubj = apiSubj(js.apiPrefix, fmt.Sprintf(apiConsumerCreateT, stream, consumerName)) - } - var resp consumerInfoResponse - - if _, err := js.apiRequestJSON(ctx, ccSubj, &resp, reqJSON); err != nil { - return nil, err - } - if resp.Error != nil { - if resp.Error.ErrorCode == JSErrCodeStreamNotFound { - return nil, ErrStreamNotFound - } - return nil, resp.Error - } - - // check whether multiple filter subjects (if used) are reflected in the returned ConsumerInfo - if len(cfg.FilterSubjects) != 0 && len(resp.Config.FilterSubjects) == 0 { - return nil, ErrConsumerMultipleFilterSubjectsNotSupported - } - - return &pullConsumer{ - jetStream: js, - stream: stream, - name: resp.Name, - durable: cfg.Durable != "", - info: resp.ConsumerInfo, - subscriptions: make(map[string]*pullSubscription), - }, nil -} - -const ( - consumerActionCreate = "create" - consumerActionUpdate = "update" - consumerActionCreateOrUpdate = "" -) - -func generateConsName() string { - name := nuid.Next() - sha := sha256.New() - sha.Write([]byte(name)) - b := sha.Sum(nil) - for i := 0; i < 8; i++ { - b[i] = rdigits[int(b[i]%base)] - } - return string(b[:8]) -} - -func getConsumer(ctx context.Context, js *jetStream, stream, name string) (Consumer, error) { - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - if err := validateConsumerName(name); err != nil { - return nil, err - } - infoSubject := apiSubj(js.apiPrefix, fmt.Sprintf(apiConsumerInfoT, stream, name)) - - var resp consumerInfoResponse - - if _, err := js.apiRequestJSON(ctx, infoSubject, &resp); err != nil { - return nil, err - } - if resp.Error != nil { - if resp.Error.ErrorCode == JSErrCodeConsumerNotFound { - return nil, ErrConsumerNotFound - } - return nil, resp.Error - } - if resp.Error == nil && resp.ConsumerInfo == nil { - return nil, ErrConsumerNotFound - } - - cons := &pullConsumer{ - jetStream: js, - stream: stream, - name: name, - durable: resp.Config.Durable != "", - info: resp.ConsumerInfo, - subscriptions: make(map[string]*pullSubscription, 0), - } - - return cons, nil -} - -func deleteConsumer(ctx context.Context, js *jetStream, stream, consumer string) error { - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - if err := validateConsumerName(consumer); err != nil { - return err - } - deleteSubject := apiSubj(js.apiPrefix, fmt.Sprintf(apiConsumerDeleteT, stream, consumer)) - - var resp consumerDeleteResponse - - if _, err := js.apiRequestJSON(ctx, deleteSubject, &resp); err != nil { - return err - } - if resp.Error != nil { - if resp.Error.ErrorCode == JSErrCodeConsumerNotFound { - return ErrConsumerNotFound - } - return resp.Error - } - return nil -} - -func validateConsumerName(dur string) error { - if strings.Contains(dur, ".") { - return fmt.Errorf("%w: %q", ErrInvalidConsumerName, dur) - } - return nil -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/consumer_config.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/consumer_config.go deleted file mode 100644 index 0ff4672..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/consumer_config.go +++ /dev/null @@ -1,460 +0,0 @@ -// Copyright 2022-2024 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jetstream - -import ( - "encoding/json" - "fmt" - "time" -) - -type ( - // ConsumerInfo is the detailed information about a JetStream consumer. - ConsumerInfo struct { - // Stream specifies the name of the stream that the consumer is bound - // to. - Stream string `json:"stream_name"` - - // Name represents the unique identifier for the consumer. This can be - // either set explicitly by the client or generated automatically if not - // set. - Name string `json:"name"` - - // Created is the timestamp when the consumer was created. - Created time.Time `json:"created"` - - // Config contains the configuration settings of the consumer, set when - // creating or updating the consumer. - Config ConsumerConfig `json:"config"` - - // Delivered holds information about the most recently delivered - // message, including its sequence numbers and timestamp. - Delivered SequenceInfo `json:"delivered"` - - // AckFloor indicates the message before the first unacknowledged - // message. - AckFloor SequenceInfo `json:"ack_floor"` - - // NumAckPending is the number of messages that have been delivered but - // not yet acknowledged. - NumAckPending int `json:"num_ack_pending"` - - // NumRedelivered counts the number of messages that have been - // redelivered and not yet acknowledged. Each message is counted only - // once, even if it has been redelivered multiple times. This count is - // reset when the message is eventually acknowledged. - NumRedelivered int `json:"num_redelivered"` - - // NumWaiting is the count of active pull requests. It is only relevant - // for pull-based consumers. - NumWaiting int `json:"num_waiting"` - - // NumPending is the number of messages that match the consumer's - // filter, but have not been delivered yet. - NumPending uint64 `json:"num_pending"` - - // Cluster contains information about the cluster to which this consumer - // belongs (if applicable). - Cluster *ClusterInfo `json:"cluster,omitempty"` - - // PushBound indicates whether at least one subscription exists for the - // delivery subject of this consumer. This is only applicable to - // push-based consumers. - PushBound bool `json:"push_bound,omitempty"` - - // TimeStamp indicates when the info was gathered by the server. - TimeStamp time.Time `json:"ts"` - } - - // ConsumerConfig is the configuration of a JetStream consumer. - ConsumerConfig struct { - // Name is an optional name for the consumer. If not set, one is - // generated automatically. - // - // Name cannot contain whitespace, ., *, >, path separators (forward or - // backwards slash), and non-printable characters. - Name string `json:"name,omitempty"` - - // Durable is an optional durable name for the consumer. If both Durable - // and Name are set, they have to be equal. Unless InactiveThreshold is set, a - // durable consumer will not be cleaned up automatically. - // - // Durable cannot contain whitespace, ., *, >, path separators (forward or - // backwards slash), and non-printable characters. - Durable string `json:"durable_name,omitempty"` - - // Description provides an optional description of the consumer. - Description string `json:"description,omitempty"` - - // DeliverPolicy defines from which point to start delivering messages - // from the stream. Defaults to DeliverAllPolicy. - DeliverPolicy DeliverPolicy `json:"deliver_policy"` - - // OptStartSeq is an optional sequence number from which to start - // message delivery. Only applicable when DeliverPolicy is set to - // DeliverByStartSequencePolicy. - OptStartSeq uint64 `json:"opt_start_seq,omitempty"` - - // OptStartTime is an optional time from which to start message - // delivery. Only applicable when DeliverPolicy is set to - // DeliverByStartTimePolicy. - OptStartTime *time.Time `json:"opt_start_time,omitempty"` - - // AckPolicy defines the acknowledgement policy for the consumer. - // Defaults to AckExplicitPolicy. - AckPolicy AckPolicy `json:"ack_policy"` - - // AckWait defines how long the server will wait for an acknowledgement - // before resending a message. If not set, server default is 30 seconds. - AckWait time.Duration `json:"ack_wait,omitempty"` - - // MaxDeliver defines the maximum number of delivery attempts for a - // message. Applies to any message that is re-sent due to ack policy. - // If not set, server default is -1 (unlimited). - MaxDeliver int `json:"max_deliver,omitempty"` - - // BackOff specifies the optional back-off intervals for retrying - // message delivery after a failed acknowledgement. It overrides - // AckWait. - // - // BackOff only applies to messages not acknowledged in specified time, - // not messages that were nack'ed. - // - // The number of intervals specified must be lower or equal to - // MaxDeliver. If the number of intervals is lower, the last interval is - // used for all remaining attempts. - BackOff []time.Duration `json:"backoff,omitempty"` - - // FilterSubject can be used to filter messages delivered from the - // stream. FilterSubject is exclusive with FilterSubjects. - FilterSubject string `json:"filter_subject,omitempty"` - - // ReplayPolicy defines the rate at which messages are sent to the - // consumer. If ReplayOriginalPolicy is set, messages are sent in the - // same intervals in which they were stored on stream. This can be used - // e.g. to simulate production traffic in development environments. If - // ReplayInstantPolicy is set, messages are sent as fast as possible. - // Defaults to ReplayInstantPolicy. - ReplayPolicy ReplayPolicy `json:"replay_policy"` - - // RateLimit specifies an optional maximum rate of message delivery in - // bits per second. - RateLimit uint64 `json:"rate_limit_bps,omitempty"` - - // SampleFrequency is an optional frequency for sampling how often - // acknowledgements are sampled for observability. See - // https://docs.nats.io/running-a-nats-service/nats_admin/monitoring/monitoring_jetstream - SampleFrequency string `json:"sample_freq,omitempty"` - - // MaxWaiting is a maximum number of pull requests waiting to be - // fulfilled. If not set, this will inherit settings from stream's - // ConsumerLimits or (if those are not set) from account settings. If - // neither are set, server default is 512. - MaxWaiting int `json:"max_waiting,omitempty"` - - // MaxAckPending is a maximum number of outstanding unacknowledged - // messages. Once this limit is reached, the server will suspend sending - // messages to the consumer. If not set, server default is 1000 - // seconds. Set to -1 for unlimited. - MaxAckPending int `json:"max_ack_pending,omitempty"` - - // HeadersOnly indicates whether only headers of messages should be sent - // (and no payload). Defaults to false. - HeadersOnly bool `json:"headers_only,omitempty"` - - // MaxRequestBatch is the optional maximum batch size a single pull - // request can make. When set with MaxRequestMaxBytes, the batch size - // will be constrained by whichever limit is hit first. - MaxRequestBatch int `json:"max_batch,omitempty"` - - // MaxRequestExpires is the maximum duration a single pull request will - // wait for messages to be available to pull. - MaxRequestExpires time.Duration `json:"max_expires,omitempty"` - - // MaxRequestMaxBytes is the optional maximum total bytes that can be - // requested in a given batch. When set with MaxRequestBatch, the batch - // size will be constrained by whichever limit is hit first. - MaxRequestMaxBytes int `json:"max_bytes,omitempty"` - - // InactiveThreshold is a duration which instructs the server to clean - // up the consumer if it has been inactive for the specified duration. - // Durable consumers will not be cleaned up by default, but if - // InactiveThreshold is set, they will be. If not set, this will inherit - // settings from stream's ConsumerLimits. If neither are set, server - // default is 5 seconds. - // - // A consumer is considered inactive there are not pull requests - // received by the server (for pull consumers), or no interest detected - // on deliver subject (for push consumers), not if there are no - // messages to be delivered. - InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` - - // Replicas the number of replicas for the consumer's state. By default, - // consumers inherit the number of replicas from the stream. - Replicas int `json:"num_replicas"` - - // MemoryStorage is a flag to force the consumer to use memory storage - // rather than inherit the storage type from the stream. - MemoryStorage bool `json:"mem_storage,omitempty"` - - // FilterSubjects allows filtering messages from a stream by subject. - // This field is exclusive with FilterSubject. Requires nats-server - // v2.10.0 or later. - FilterSubjects []string `json:"filter_subjects,omitempty"` - - // Metadata is a set of application-defined key-value pairs for - // associating metadata on the consumer. This feature requires - // nats-server v2.10.0 or later. - Metadata map[string]string `json:"metadata,omitempty"` - } - - // OrderedConsumerConfig is the configuration of an ordered JetStream - // consumer. For more information, see [Ordered Consumers] in README - // - // [Ordered Consumers]: https://github.com/nats-io/nats.go/blob/main/jetstream/README.md#ordered-consumers - OrderedConsumerConfig struct { - // FilterSubjects allows filtering messages from a stream by subject. - // This field is exclusive with FilterSubject. Requires nats-server - // v2.10.0 or later. - FilterSubjects []string `json:"filter_subjects,omitempty"` - - // DeliverPolicy defines from which point to start delivering messages - // from the stream. Defaults to DeliverAllPolicy. - DeliverPolicy DeliverPolicy `json:"deliver_policy"` - - // OptStartSeq is an optional sequence number from which to start - // message delivery. Only applicable when DeliverPolicy is set to - // DeliverByStartSequencePolicy. - OptStartSeq uint64 `json:"opt_start_seq,omitempty"` - - // OptStartTime is an optional time from which to start message - // delivery. Only applicable when DeliverPolicy is set to - // DeliverByStartTimePolicy. - OptStartTime *time.Time `json:"opt_start_time,omitempty"` - - // ReplayPolicy defines the rate at which messages are sent to the - // consumer. If ReplayOriginalPolicy is set, messages are sent in the - // same intervals in which they were stored on stream. This can be used - // e.g. to simulate production traffic in development environments. If - // ReplayInstantPolicy is set, messages are sent as fast as possible. - // Defaults to ReplayInstantPolicy. - ReplayPolicy ReplayPolicy `json:"replay_policy"` - - // InactiveThreshold is a duration which instructs the server to clean - // up the consumer if it has been inactive for the specified duration. - // Defaults to 5s. - InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` - - // HeadersOnly indicates whether only headers of messages should be sent - // (and no payload). Defaults to false. - HeadersOnly bool `json:"headers_only,omitempty"` - - // Maximum number of attempts for the consumer to be recreated in a - // single recreation cycle. Defaults to unlimited. - MaxResetAttempts int - } - - // DeliverPolicy determines from which point to start delivering messages. - DeliverPolicy int - - // AckPolicy determines how the consumer should acknowledge delivered - // messages. - AckPolicy int - - // ReplayPolicy determines how the consumer should replay messages it - // already has queued in the stream. - ReplayPolicy int - - // SequenceInfo has both the consumer and the stream sequence and last - // activity. - SequenceInfo struct { - Consumer uint64 `json:"consumer_seq"` - Stream uint64 `json:"stream_seq"` - Last *time.Time `json:"last_active,omitempty"` - } -) - -const ( - // DeliverAllPolicy starts delivering messages from the very beginning of a - // stream. This is the default. - DeliverAllPolicy DeliverPolicy = iota - - // DeliverLastPolicy will start the consumer with the last sequence - // received. - DeliverLastPolicy - - // DeliverNewPolicy will only deliver new messages that are sent after the - // consumer is created. - DeliverNewPolicy - - // DeliverByStartSequencePolicy will deliver messages starting from a given - // sequence configured with OptStartSeq in ConsumerConfig. - DeliverByStartSequencePolicy - - // DeliverByStartTimePolicy will deliver messages starting from a given time - // configured with OptStartTime in ConsumerConfig. - DeliverByStartTimePolicy - - // DeliverLastPerSubjectPolicy will start the consumer with the last message - // for all subjects received. - DeliverLastPerSubjectPolicy -) - -func (p *DeliverPolicy) UnmarshalJSON(data []byte) error { - switch string(data) { - case jsonString("all"), jsonString("undefined"): - *p = DeliverAllPolicy - case jsonString("last"): - *p = DeliverLastPolicy - case jsonString("new"): - *p = DeliverNewPolicy - case jsonString("by_start_sequence"): - *p = DeliverByStartSequencePolicy - case jsonString("by_start_time"): - *p = DeliverByStartTimePolicy - case jsonString("last_per_subject"): - *p = DeliverLastPerSubjectPolicy - default: - return fmt.Errorf("nats: can not unmarshal %q", data) - } - - return nil -} - -func (p DeliverPolicy) MarshalJSON() ([]byte, error) { - switch p { - case DeliverAllPolicy: - return json.Marshal("all") - case DeliverLastPolicy: - return json.Marshal("last") - case DeliverNewPolicy: - return json.Marshal("new") - case DeliverByStartSequencePolicy: - return json.Marshal("by_start_sequence") - case DeliverByStartTimePolicy: - return json.Marshal("by_start_time") - case DeliverLastPerSubjectPolicy: - return json.Marshal("last_per_subject") - } - return nil, fmt.Errorf("nats: unknown deliver policy %v", p) -} - -func (p DeliverPolicy) String() string { - switch p { - case DeliverAllPolicy: - return "all" - case DeliverLastPolicy: - return "last" - case DeliverNewPolicy: - return "new" - case DeliverByStartSequencePolicy: - return "by_start_sequence" - case DeliverByStartTimePolicy: - return "by_start_time" - case DeliverLastPerSubjectPolicy: - return "last_per_subject" - } - return "" -} - -const ( - // AckExplicitPolicy requires ack or nack for all messages. - AckExplicitPolicy AckPolicy = iota - - // AckAllPolicy when acking a sequence number, this implicitly acks all - // sequences below this one as well. - AckAllPolicy - - // AckNonePolicy requires no acks for delivered messages. - AckNonePolicy -) - -func (p *AckPolicy) UnmarshalJSON(data []byte) error { - switch string(data) { - case jsonString("none"): - *p = AckNonePolicy - case jsonString("all"): - *p = AckAllPolicy - case jsonString("explicit"): - *p = AckExplicitPolicy - default: - return fmt.Errorf("nats: can not unmarshal %q", data) - } - return nil -} - -func (p AckPolicy) MarshalJSON() ([]byte, error) { - switch p { - case AckNonePolicy: - return json.Marshal("none") - case AckAllPolicy: - return json.Marshal("all") - case AckExplicitPolicy: - return json.Marshal("explicit") - } - return nil, fmt.Errorf("nats: unknown acknowledgement policy %v", p) -} - -func (p AckPolicy) String() string { - switch p { - case AckNonePolicy: - return "AckNone" - case AckAllPolicy: - return "AckAll" - case AckExplicitPolicy: - return "AckExplicit" - } - return "Unknown AckPolicy" -} - -const ( - // ReplayInstantPolicy will replay messages as fast as possible. - ReplayInstantPolicy ReplayPolicy = iota - - // ReplayOriginalPolicy will maintain the same timing as the messages were - // received. - ReplayOriginalPolicy -) - -func (p *ReplayPolicy) UnmarshalJSON(data []byte) error { - switch string(data) { - case jsonString("instant"): - *p = ReplayInstantPolicy - case jsonString("original"): - *p = ReplayOriginalPolicy - default: - return fmt.Errorf("nats: can not unmarshal %q", data) - } - return nil -} - -func (p ReplayPolicy) MarshalJSON() ([]byte, error) { - switch p { - case ReplayOriginalPolicy: - return json.Marshal("original") - case ReplayInstantPolicy: - return json.Marshal("instant") - } - return nil, fmt.Errorf("nats: unknown replay policy %v", p) -} - -func (p ReplayPolicy) String() string { - switch p { - case ReplayOriginalPolicy: - return "original" - case ReplayInstantPolicy: - return "instant" - } - return "" -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/errors.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/errors.go deleted file mode 100644 index 5ed5176..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/errors.go +++ /dev/null @@ -1,417 +0,0 @@ -// Copyright 2022-2024 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jetstream - -import ( - "errors" - "fmt" -) - -type ( - // JetStreamError is an error result that happens when using JetStream. - // In case of client-side error, [APIError] returns nil. - JetStreamError interface { - APIError() *APIError - error - } - - jsError struct { - apiErr *APIError - message string - } - - // APIError is included in all API responses if there was an error. - APIError struct { - Code int `json:"code"` - ErrorCode ErrorCode `json:"err_code"` - Description string `json:"description,omitempty"` - } - - // ErrorCode represents error_code returned in response from JetStream API. - ErrorCode uint16 -) - -const ( - JSErrCodeJetStreamNotEnabledForAccount ErrorCode = 10039 - JSErrCodeJetStreamNotEnabled ErrorCode = 10076 - - JSErrCodeStreamNotFound ErrorCode = 10059 - JSErrCodeStreamNameInUse ErrorCode = 10058 - - JSErrCodeConsumerCreate ErrorCode = 10012 - JSErrCodeConsumerNotFound ErrorCode = 10014 - JSErrCodeConsumerNameExists ErrorCode = 10013 - JSErrCodeConsumerAlreadyExists ErrorCode = 10105 - JSErrCodeConsumerExists ErrorCode = 10148 - JSErrCodeDuplicateFilterSubjects ErrorCode = 10136 - JSErrCodeOverlappingFilterSubjects ErrorCode = 10138 - JSErrCodeConsumerEmptyFilter ErrorCode = 10139 - JSErrCodeConsumerDoesNotExist ErrorCode = 10149 - - JSErrCodeMessageNotFound ErrorCode = 10037 - - JSErrCodeBadRequest ErrorCode = 10003 - - JSErrCodeStreamWrongLastSequence ErrorCode = 10071 -) - -var ( - // JetStream API errors - - // ErrJetStreamNotEnabled is an error returned when JetStream is not - // enabled. - ErrJetStreamNotEnabled JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabled, Description: "jetstream not enabled", Code: 503}} - - // ErrJetStreamNotEnabledForAccount is an error returned when JetStream is - // not enabled for an account. - ErrJetStreamNotEnabledForAccount JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabledForAccount, Description: "jetstream not enabled for account", Code: 503}} - - // ErrStreamNotFound is an error returned when stream with given name does - // not exist. - ErrStreamNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNotFound, Description: "stream not found", Code: 404}} - - // ErrStreamNameAlreadyInUse is returned when a stream with given name - // already exists and has a different configuration. - ErrStreamNameAlreadyInUse JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNameInUse, Description: "stream name already in use", Code: 400}} - - // ErrStreamSubjectTransformNotSupported is returned when the connected - // nats-server version does not support setting the stream subject - // transform. If this error is returned when executing CreateStream(), the - // stream with invalid configuration was already created in the server. - ErrStreamSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"} - - // ErrStreamSourceSubjectTransformNotSupported is returned when the - // connected nats-server version does not support setting the stream source - // subject transform. If this error is returned when executing - // CreateStream(), the stream with invalid configuration was already created - // in the server. - ErrStreamSourceSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"} - - // ErrStreamSourceNotSupported is returned when the connected nats-server - // version does not support setting the stream sources. If this error is - // returned when executing CreateStream(), the stream with invalid - // configuration was already created in the server. - ErrStreamSourceNotSupported JetStreamError = &jsError{message: "stream sourcing is not supported by nats-server"} - - // ErrStreamSourceMultipleFilterSubjectsNotSupported is returned when the - // connected nats-server version does not support setting the stream - // sources. If this error is returned when executing CreateStream(), the - // stream with invalid configuration was already created in the server. - ErrStreamSourceMultipleFilterSubjectsNotSupported JetStreamError = &jsError{message: "stream sourcing with multiple subject filters not supported by nats-server"} - - // ErrConsumerNotFound is an error returned when consumer with given name - // does not exist. - ErrConsumerNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerNotFound, Description: "consumer not found", Code: 404}} - - // ErrConsumerExists is returned when attempting to create a consumer with - // CreateConsumer but a consumer with given name already exists. - ErrConsumerExists JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerExists, Description: "consumer already exists", Code: 400}} - - // ErrConsumerNameExists is returned when attempting to update a consumer - // with UpdateConsumer but a consumer with given name does not exist. - ErrConsumerDoesNotExist JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerDoesNotExist, Description: "consumer does not exist", Code: 400}} - - // ErrMsgNotFound is returned when message with provided sequence number - // does not exist. - ErrMsgNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeMessageNotFound, Description: "message not found", Code: 404}} - - // ErrBadRequest is returned when invalid request is sent to JetStream API. - ErrBadRequest JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeBadRequest, Description: "bad request", Code: 400}} - - // ErrConsumerCreate is returned when nats-server reports error when - // creating consumer (e.g. illegal update). - ErrConsumerCreate JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerCreate, Description: "could not create consumer", Code: 500}} - - // ErrDuplicateFilterSubjects is returned when both FilterSubject and - // FilterSubjects are specified when creating consumer. - ErrDuplicateFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeDuplicateFilterSubjects, Description: "consumer cannot have both FilterSubject and FilterSubjects specified", Code: 500}} - - // ErrDuplicateFilterSubjects is returned when filter subjects overlap when - // creating consumer. - ErrOverlappingFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeOverlappingFilterSubjects, Description: "consumer subject filters cannot overlap", Code: 500}} - - // ErrEmptyFilter is returned when a filter in FilterSubjects is empty. - ErrEmptyFilter JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerEmptyFilter, Description: "consumer filter in FilterSubjects cannot be empty", Code: 500}} - - // Client errors - - // ErrConsumerMultipleFilterSubjectsNotSupported is returned when the - // connected nats-server version does not support setting multiple filter - // subjects with filter_subjects field. If this error is returned when - // executing AddConsumer(), the consumer with invalid configuration was - // already created in the server. - ErrConsumerMultipleFilterSubjectsNotSupported JetStreamError = &jsError{message: "multiple consumer filter subjects not supported by nats-server"} - - // ErrConsumerNotFound is an error returned when consumer with given name - // does not exist. - ErrConsumerNameAlreadyInUse JetStreamError = &jsError{message: "consumer name already in use"} - - // ErrInvalidJSAck is returned when JetStream ack from message publish is - // invalid. - ErrInvalidJSAck JetStreamError = &jsError{message: "invalid jetstream publish response"} - - // ErrStreamNameRequired is returned when the provided stream name is empty. - ErrStreamNameRequired JetStreamError = &jsError{message: "stream name is required"} - - // ErrMsgAlreadyAckd is returned when attempting to acknowledge message more - // than once. - ErrMsgAlreadyAckd JetStreamError = &jsError{message: "message was already acknowledged"} - - // ErrNoStreamResponse is returned when there is no response from stream - // (e.g. no responders error). - ErrNoStreamResponse JetStreamError = &jsError{message: "no response from stream"} - - // ErrNotJSMessage is returned when attempting to get metadata from non - // JetStream message. - ErrNotJSMessage JetStreamError = &jsError{message: "not a jetstream message"} - - // ErrInvalidStreamName is returned when the provided stream name is invalid - // (contains '.'). - ErrInvalidStreamName JetStreamError = &jsError{message: "invalid stream name"} - - // ErrInvalidSubject is returned when the provided subject name is invalid. - ErrInvalidSubject JetStreamError = &jsError{message: "invalid subject name"} - - // ErrInvalidConsumerName is returned when the provided consumer name is - // invalid (contains '.'). - ErrInvalidConsumerName JetStreamError = &jsError{message: "invalid consumer name"} - - // ErrNoMessages is returned when no messages are currently available for a - // consumer. - ErrNoMessages JetStreamError = &jsError{message: "no messages"} - - // ErrMaxBytesExceeded is returned when a message would exceed MaxBytes set - // on a pull request. - ErrMaxBytesExceeded JetStreamError = &jsError{message: "message size exceeds max bytes"} - - // ErrConsumerDeleted is returned when attempting to send pull request to a - // consumer which does not exist. - ErrConsumerDeleted JetStreamError = &jsError{message: "consumer deleted"} - - // ErrConsumerLeadershipChanged is returned when pending requests are no - // longer valid after leadership has changed. - ErrConsumerLeadershipChanged JetStreamError = &jsError{message: "leadership change"} - - // ErrHandlerRequired is returned when no handler func is provided in - // Stream(). - ErrHandlerRequired JetStreamError = &jsError{message: "handler cannot be empty"} - - // ErrEndOfData is returned when iterating over paged API from JetStream - // reaches end of data. - ErrEndOfData JetStreamError = &jsError{message: "end of data reached"} - - // ErrNoHeartbeat is received when no message is received in IdleHeartbeat - // time (if set). - ErrNoHeartbeat JetStreamError = &jsError{message: "no heartbeat received"} - - // ErrConsumerHasActiveSubscription is returned when a consumer is already - // subscribed to a stream. - ErrConsumerHasActiveSubscription JetStreamError = &jsError{message: "consumer has active subscription"} - - // ErrMsgNotBound is returned when given message is not bound to any - // subscription. - ErrMsgNotBound JetStreamError = &jsError{message: "message is not bound to subscription/connection"} - - // ErrMsgNoReply is returned when attempting to reply to a message without a - // reply subject. - ErrMsgNoReply JetStreamError = &jsError{message: "message does not have a reply"} - - // ErrMsgDeleteUnsuccessful is returned when an attempt to delete a message - // is unsuccessful. - ErrMsgDeleteUnsuccessful JetStreamError = &jsError{message: "message deletion unsuccessful"} - - // ErrAsyncPublishReplySubjectSet is returned when reply subject is set on - // async message publish. - ErrAsyncPublishReplySubjectSet JetStreamError = &jsError{message: "reply subject should be empty"} - - // ErrTooManyStalledMsgs is returned when too many outstanding async - // messages are waiting for ack. - ErrTooManyStalledMsgs JetStreamError = &jsError{message: "stalled with too many outstanding async published messages"} - - // ErrInvalidOption is returned when there is a collision between options. - ErrInvalidOption JetStreamError = &jsError{message: "invalid jetstream option"} - - // ErrMsgIteratorClosed is returned when attempting to get message from a - // closed iterator. - ErrMsgIteratorClosed JetStreamError = &jsError{message: "messages iterator closed"} - - // ErrOrderedConsumerReset is returned when resetting ordered consumer fails - // due to too many attempts. - ErrOrderedConsumerReset JetStreamError = &jsError{message: "recreating ordered consumer"} - - // ErrOrderConsumerUsedAsFetch is returned when ordered consumer was already - // used to process messages using Fetch (or FetchBytes). - ErrOrderConsumerUsedAsFetch JetStreamError = &jsError{message: "ordered consumer initialized as fetch"} - - // ErrOrderConsumerUsedAsConsume is returned when ordered consumer was - // already used to process messages using Consume or Messages. - ErrOrderConsumerUsedAsConsume JetStreamError = &jsError{message: "ordered consumer initialized as consume"} - - // ErrOrderedConsumerConcurrentRequests is returned when attempting to run - // concurrent operations on ordered consumers. - ErrOrderedConsumerConcurrentRequests JetStreamError = &jsError{message: "cannot run concurrent processing using ordered consumer"} - - // ErrOrderedConsumerNotCreated is returned when trying to get consumer info - // of an ordered consumer which was not yet created. - ErrOrderedConsumerNotCreated JetStreamError = &jsError{message: "consumer instance not yet created"} - - // KeyValue Errors - - // ErrKeyExists is returned when attempting to create a key that already - // exists. - ErrKeyExists JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamWrongLastSequence, Code: 400}, message: "key exists"} - - // ErrKeyValueConfigRequired is returned when attempting to create a bucket - // without a config. - ErrKeyValueConfigRequired JetStreamError = &jsError{message: "config required"} - - // ErrInvalidBucketName is returned when attempting to create a bucket with - // an invalid name. - ErrInvalidBucketName JetStreamError = &jsError{message: "invalid bucket name"} - - // ErrInvalidKey is returned when attempting to create a key with an invalid - // name. - ErrInvalidKey JetStreamError = &jsError{message: "invalid key"} - - // ErrBucketExists is returned when attempting to create a bucket that - // already exists and has a different configuration. - ErrBucketExists JetStreamError = &jsError{message: "bucket name already in use"} - - // ErrBucketNotFound is returned when attempting to access a bucket that - // does not exist. - ErrBucketNotFound JetStreamError = &jsError{message: "bucket not found"} - - // ErrBadBucket is returned when attempting to access a bucket that is not a - // key-value store. - ErrBadBucket JetStreamError = &jsError{message: "bucket not valid key-value store"} - - // ErrKeyNotFound is returned when attempting to access a key that does not - // exist. - ErrKeyNotFound JetStreamError = &jsError{message: "key not found"} - - // ErrKeyDeleted is returned when attempting to access a key that was - // deleted. - ErrKeyDeleted JetStreamError = &jsError{message: "key was deleted"} - - // ErrHistoryToLarge is returned when provided history limit is larger than - // 64. - ErrHistoryTooLarge JetStreamError = &jsError{message: "history limited to a max of 64"} - - // ErrNoKeysFound is returned when no keys are found. - ErrNoKeysFound JetStreamError = &jsError{message: "no keys found"} - - // ErrObjectConfigRequired is returned when attempting to create an object - // without a config. - ErrObjectConfigRequired JetStreamError = &jsError{message: "object-store config required"} - - // ErrBadObjectMeta is returned when the meta information of an object is - // invalid. - ErrBadObjectMeta JetStreamError = &jsError{message: "object-store meta information invalid"} - - // ErrObjectNotFound is returned when an object is not found. - ErrObjectNotFound JetStreamError = &jsError{message: "object not found"} - - // ErrInvalidStoreName is returned when the name of an object-store is - // invalid. - ErrInvalidStoreName JetStreamError = &jsError{message: "invalid object-store name"} - - // ErrDigestMismatch is returned when the digests of an object do not match. - ErrDigestMismatch JetStreamError = &jsError{message: "received a corrupt object, digests do not match"} - - // ErrInvalidDigestFormat is returned when the digest hash of an object has - // an invalid format. - ErrInvalidDigestFormat JetStreamError = &jsError{message: "object digest hash has invalid format"} - - // ErrNoObjectsFound is returned when no objects are found. - ErrNoObjectsFound JetStreamError = &jsError{message: "no objects found"} - - // ErrObjectAlreadyExists is returned when an object with the same name - // already exists. - ErrObjectAlreadyExists JetStreamError = &jsError{message: "an object already exists with that name"} - - // ErrNameRequired is returned when a name is required. - ErrNameRequired JetStreamError = &jsError{message: "name is required"} - - // ErrLinkNotAllowed is returned when a link cannot be set when putting the - // object in a bucket. - ErrLinkNotAllowed JetStreamError = &jsError{message: "link cannot be set when putting the object in bucket"} - - // ErrObjectRequired is returned when an object is required. - ErrObjectRequired = &jsError{message: "object required"} - - // ErrNoLinkToDeleted is returned when it is not allowed to link to a - // deleted object. - ErrNoLinkToDeleted JetStreamError = &jsError{message: "not allowed to link to a deleted object"} - - // ErrNoLinkToLink is returned when it is not allowed to link to another - // link. - ErrNoLinkToLink JetStreamError = &jsError{message: "not allowed to link to another link"} - - // ErrCantGetBucket is returned when an invalid Get is attempted on an - // object that is a link to a bucket. - ErrCantGetBucket JetStreamError = &jsError{message: "invalid Get, object is a link to a bucket"} - - // ErrBucketRequired is returned when a bucket is required. - ErrBucketRequired JetStreamError = &jsError{message: "bucket required"} - - // ErrBucketMalformed is returned when a bucket is malformed. - ErrBucketMalformed JetStreamError = &jsError{message: "bucket malformed"} - - // ErrUpdateMetaDeleted is returned when the meta information of a deleted - // object cannot be updated. - ErrUpdateMetaDeleted JetStreamError = &jsError{message: "cannot update meta for a deleted object"} -) - -// Error prints the JetStream API error code and description. -func (e *APIError) Error() string { - return fmt.Sprintf("nats: API error: code=%d err_code=%d description=%s", e.Code, e.ErrorCode, e.Description) -} - -// APIError implements the JetStreamError interface. -func (e *APIError) APIError() *APIError { - return e -} - -// Is matches against an APIError. -func (e *APIError) Is(err error) bool { - if e == nil { - return false - } - // Extract internal APIError to match against. - var aerr *APIError - ok := errors.As(err, &aerr) - if !ok { - return ok - } - return e.ErrorCode == aerr.ErrorCode -} - -func (err *jsError) APIError() *APIError { - return err.apiErr -} - -func (err *jsError) Error() string { - if err.apiErr != nil && err.apiErr.Description != "" { - return err.apiErr.Error() - } - return fmt.Sprintf("nats: %s", err.message) -} - -func (err *jsError) Unwrap() error { - // Allow matching to embedded APIError in case there is one. - if err.apiErr == nil { - return nil - } - return err.apiErr -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/jetstream.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/jetstream.go deleted file mode 100644 index 19fb2d8..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/jetstream.go +++ /dev/null @@ -1,1048 +0,0 @@ -// Copyright 2022-2024 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jetstream - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "regexp" - "strings" - "time" - - "github.com/nats-io/nats.go" - "github.com/nats-io/nuid" -) - -type ( - - // JetStream is the top-level interface for interacting with JetStream. - // The capabilities of JetStream include: - // - // - Publishing messages to a stream using [Publisher]. - // - Managing streams using [StreamManager]. - // - Managing consumers using [StreamConsumerManager]. Those are the same - // methods as on [Stream], but are available as a shortcut to a consumer - // bypassing stream lookup. - // - Managing KeyValue stores using [KeyValueManager]. - // - Managing Object Stores using [ObjectStoreManager]. - // - // JetStream can be created using [New], [NewWithAPIPrefix] or - // [NewWithDomain] methods. - JetStream interface { - // AccountInfo fetches account information from the server, containing details - // about the account associated with this JetStream connection. If account is - // not enabled for JetStream, ErrJetStreamNotEnabledForAccount is returned. If - // the server does not have JetStream enabled, ErrJetStreamNotEnabled is - // returned. - AccountInfo(ctx context.Context) (*AccountInfo, error) - - StreamConsumerManager - StreamManager - Publisher - KeyValueManager - ObjectStoreManager - } - - // Publisher provides methods for publishing messages to a stream. - // It is available as a part of [JetStream] interface. - // The behavior of Publisher can be customized using [PublishOpt] options. - Publisher interface { - // Publish performs a synchronous publish to a stream and waits for ack - // from server. It accepts subject name (which must be bound to a stream) - // and message payload. - Publish(ctx context.Context, subject string, payload []byte, opts ...PublishOpt) (*PubAck, error) - - // PublishMsg performs a synchronous publish to a stream and waits for - // ack from server. It accepts subject name (which must be bound to a - // stream) and nats.Message. - PublishMsg(ctx context.Context, msg *nats.Msg, opts ...PublishOpt) (*PubAck, error) - - // PublishAsync performs a publish to a stream and returns - // [PubAckFuture] interface, not blocking while waiting for an - // acknowledgement. It accepts subject name (which must be bound to a - // stream) and message payload. - // - // PublishAsync does not guarantee that the message has been - // received by the server. It only guarantees that the message has been - // sent to the server and thus messages can be stored in the stream - // out of order in case of retries. - PublishAsync(subject string, payload []byte, opts ...PublishOpt) (PubAckFuture, error) - - // PublishMsgAsync performs a publish to a stream and returns - // [PubAckFuture] interface, not blocking while waiting for an - // acknowledgement. It accepts subject name (which must - // be bound to a stream) and nats.Message. - // - // PublishMsgAsync does not guarantee that the message has been - // sent to the server and thus messages can be stored in the stream - // received by the server. It only guarantees that the message has been - // out of order in case of retries. - PublishMsgAsync(msg *nats.Msg, opts ...PublishOpt) (PubAckFuture, error) - - // PublishAsyncPending returns the number of async publishes outstanding - // for this context. An outstanding publish is one that has been - // sent by the publisher but has not yet received an ack. - PublishAsyncPending() int - - // PublishAsyncComplete returns a channel that will be closed when all - // outstanding asynchronously published messages are acknowledged by the - // server. - PublishAsyncComplete() <-chan struct{} - } - - // StreamManager provides CRUD API for managing streams. It is available as - // a part of [JetStream] interface. CreateStream, UpdateStream, - // CreateOrUpdateStream and Stream methods return a [Stream] interface, allowing - // to operate on a stream. - StreamManager interface { - // CreateStream creates a new stream with given config and returns an - // interface to operate on it. If stream with given name already exists, - // ErrStreamNameAlreadyInUse is returned. - CreateStream(ctx context.Context, cfg StreamConfig) (Stream, error) - - // UpdateStream updates an existing stream. If stream does not exist, - // ErrStreamNotFound is returned. - UpdateStream(ctx context.Context, cfg StreamConfig) (Stream, error) - - // CreateOrUpdateStream creates a stream with given config. If stream - // already exists, it will be updated (if possible). - CreateOrUpdateStream(ctx context.Context, cfg StreamConfig) (Stream, error) - - // Stream fetches [StreamInfo] and returns a [Stream] interface for a given stream name. - // If stream does not exist, ErrStreamNotFound is returned. - Stream(ctx context.Context, stream string) (Stream, error) - - // StreamNameBySubject returns a stream name stream listening on given - // subject. If no stream is bound to given subject, ErrStreamNotFound - // is returned. - StreamNameBySubject(ctx context.Context, subject string) (string, error) - - // DeleteStream removes a stream with given name. If stream does not - // exist, ErrStreamNotFound is returned. - DeleteStream(ctx context.Context, stream string) error - - // ListStreams returns StreamInfoLister, enabling iterating over a - // channel of stream infos. - ListStreams(context.Context, ...StreamListOpt) StreamInfoLister - - // StreamNames returns a StreamNameLister, enabling iterating over a - // channel of stream names. - StreamNames(context.Context, ...StreamListOpt) StreamNameLister - } - - // StreamConsumerManager provides CRUD API for managing consumers. It is - // available as a part of [JetStream] interface. This is an alternative to - // [Stream] interface, allowing to bypass stream lookup. CreateConsumer, - // UpdateConsumer, CreateOrUpdateConsumer and Consumer methods return a - // [Consumer] interface, allowing to operate on a consumer (e.g. consume - // messages). - StreamConsumerManager interface { - // CreateOrUpdateConsumer creates a consumer on a given stream with - // given config. If consumer already exists, it will be updated (if - // possible). Consumer interface is returned, allowing to operate on a - // consumer (e.g. fetch messages). - CreateOrUpdateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) - - // CreateConsumer creates a consumer on a given stream with given - // config. If consumer already exists and the provided configuration - // differs from its configuration, ErrConsumerExists is returned. If the - // provided configuration is the same as the existing consumer, the - // existing consumer is returned. Consumer interface is returned, - // allowing to operate on a consumer (e.g. fetch messages). - CreateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) - - // UpdateConsumer updates an existing consumer. If consumer does not - // exist, ErrConsumerDoesNotExist is returned. Consumer interface is - // returned, allowing to operate on a consumer (e.g. fetch messages). - UpdateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) - - // OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer - // are managed by the library and provide a simple way to consume - // messages from a stream. Ordered consumers are ephemeral in-memory - // pull consumers and are resilient to deletes and restarts. - OrderedConsumer(ctx context.Context, stream string, cfg OrderedConsumerConfig) (Consumer, error) - - // Consumer returns an interface to an existing consumer, allowing processing - // of messages. If consumer does not exist, ErrConsumerNotFound is - // returned. - Consumer(ctx context.Context, stream string, consumer string) (Consumer, error) - - // DeleteConsumer removes a consumer with given name from a stream. - // If consumer does not exist, ErrConsumerNotFound is returned. - DeleteConsumer(ctx context.Context, stream string, consumer string) error - } - - // StreamListOpt is a functional option for [StreamManager.ListStreams] and - // [StreamManager.StreamNames] methods. - StreamListOpt func(*streamsRequest) error - - // AccountInfo contains information about the JetStream usage from the - // current account. - AccountInfo struct { - // Tier is the current account usage tier. - Tier - - // Domain is the domain name associated with this account. - Domain string `json:"domain"` - - // API is the API usage statistics for this account. - API APIStats `json:"api"` - - // Tiers is the list of available tiers for this account. - Tiers map[string]Tier `json:"tiers"` - } - - // Tier represents a JetStream account usage tier. - Tier struct { - // Memory is the memory storage being used for Stream Message storage. - Memory uint64 `json:"memory"` - - // Store is the disk storage being used for Stream Message storage. - Store uint64 `json:"storage"` - - // ReservedMemory is the number of bytes reserved for memory usage by - // this account on the server - ReservedMemory uint64 `json:"reserved_memory"` - - // ReservedStore is the number of bytes reserved for disk usage by this - // account on the server - ReservedStore uint64 `json:"reserved_storage"` - - // Streams is the number of streams currently defined for this account. - Streams int `json:"streams"` - - // Consumers is the number of consumers currently defined for this - // account. - Consumers int `json:"consumers"` - - // Limits are the JetStream limits for this account. - Limits AccountLimits `json:"limits"` - } - - // APIStats reports on API calls to JetStream for this account. - APIStats struct { - // Total is the total number of API calls. - Total uint64 `json:"total"` - - // Errors is the total number of API errors. - Errors uint64 `json:"errors"` - } - - // AccountLimits includes the JetStream limits of the current account. - AccountLimits struct { - // MaxMemory is the maximum amount of memory available for this account. - MaxMemory int64 `json:"max_memory"` - - // MaxStore is the maximum amount of disk storage available for this - // account. - MaxStore int64 `json:"max_storage"` - - // MaxStreams is the maximum number of streams allowed for this account. - MaxStreams int `json:"max_streams"` - - // MaxConsumers is the maximum number of consumers allowed for this - // account. - MaxConsumers int `json:"max_consumers"` - } - - jetStream struct { - conn *nats.Conn - jsOpts - - publisher *jetStreamClient - } - - // JetStreamOpt is a functional option for [New], [NewWithAPIPrefix] and - // [NewWithDomain] methods. - JetStreamOpt func(*jsOpts) error - - jsOpts struct { - publisherOpts asyncPublisherOpts - apiPrefix string - replyPrefix string - replyPrefixLen int - clientTrace *ClientTrace - } - - // ClientTrace can be used to trace API interactions for [JetStream]. - ClientTrace struct { - // RequestSent is called when an API request is sent to the server. - RequestSent func(subj string, payload []byte) - - // ResponseReceived is called when a response is received from the - // server. - ResponseReceived func(subj string, payload []byte, hdr nats.Header) - } - streamInfoResponse struct { - apiResponse - apiPaged - *StreamInfo - } - - accountInfoResponse struct { - apiResponse - AccountInfo - } - - streamDeleteResponse struct { - apiResponse - Success bool `json:"success,omitempty"` - } - - // StreamInfoLister is used to iterate over a channel of stream infos. - // Err method can be used to check for errors encountered during iteration. - // Info channel is always closed and therefore can be used in a range loop. - StreamInfoLister interface { - Info() <-chan *StreamInfo - Err() error - } - - // StreamNameLister is used to iterate over a channel of stream names. - // Err method can be used to check for errors encountered during iteration. - // Name channel is always closed and therefore can be used in a range loop. - StreamNameLister interface { - Name() <-chan string - Err() error - } - - apiPagedRequest struct { - Offset int `json:"offset"` - } - - streamLister struct { - js *jetStream - offset int - pageInfo *apiPaged - - streams chan *StreamInfo - names chan string - err error - } - - streamListResponse struct { - apiResponse - apiPaged - Streams []*StreamInfo `json:"streams"` - } - - streamNamesResponse struct { - apiResponse - apiPaged - Streams []string `json:"streams"` - } - - streamsRequest struct { - apiPagedRequest - Subject string `json:"subject,omitempty"` - } -) - -// defaultAPITimeout is used if context.Background() or context.TODO() is passed to API calls. -const defaultAPITimeout = 5 * time.Second - -var subjectRegexp = regexp.MustCompile(`^[^ >]*[>]?$`) - -// New returns a new JetStream instance. -// It uses default API prefix ($JS.API) for JetStream API requests. -// If a custom API prefix is required, use [NewWithAPIPrefix] or [NewWithDomain]. -// -// Available options: -// - [WithClientTrace] - enables request/response tracing. -// - [WithPublishAsyncErrHandler] - sets error handler for async message publish. -// - [WithPublishAsyncMaxPending] - sets the maximum outstanding async publishes -// that can be inflight at one time. -func New(nc *nats.Conn, opts ...JetStreamOpt) (JetStream, error) { - jsOpts := jsOpts{ - apiPrefix: DefaultAPIPrefix, - publisherOpts: asyncPublisherOpts{ - maxpa: defaultAsyncPubAckInflight, - }, - } - setReplyPrefix(nc, &jsOpts) - for _, opt := range opts { - if err := opt(&jsOpts); err != nil { - return nil, err - } - } - js := &jetStream{ - conn: nc, - jsOpts: jsOpts, - publisher: &jetStreamClient{asyncPublisherOpts: jsOpts.publisherOpts}, - } - - return js, nil -} - -const ( - // defaultAsyncPubAckInflight is the number of async pub acks inflight. - defaultAsyncPubAckInflight = 4000 -) - -func setReplyPrefix(nc *nats.Conn, jsOpts *jsOpts) { - jsOpts.replyPrefix = nats.InboxPrefix - if nc.Opts.InboxPrefix != "" { - jsOpts.replyPrefix = nc.Opts.InboxPrefix + "." - } - // Add 1 for the dot separator. - jsOpts.replyPrefixLen = len(jsOpts.replyPrefix) + aReplyTokensize + 1 - -} - -// NewWithAPIPrefix returns a new JetStream instance and sets the API prefix to be used in requests to JetStream API. -// The API prefix will be used in API requests to JetStream, e.g. .STREAM.INFO.. -// -// Available options: -// - [WithClientTrace] - enables request/response tracing. -// - [WithPublishAsyncErrHandler] - sets error handler for async message publish. -// - [WithPublishAsyncMaxPending] - sets the maximum outstanding async publishes -// that can be inflight at one time. -func NewWithAPIPrefix(nc *nats.Conn, apiPrefix string, opts ...JetStreamOpt) (JetStream, error) { - jsOpts := jsOpts{ - publisherOpts: asyncPublisherOpts{ - maxpa: defaultAsyncPubAckInflight, - }, - } - setReplyPrefix(nc, &jsOpts) - for _, opt := range opts { - if err := opt(&jsOpts); err != nil { - return nil, err - } - } - if apiPrefix == "" { - return nil, fmt.Errorf("API prefix cannot be empty") - } - if !strings.HasSuffix(apiPrefix, ".") { - jsOpts.apiPrefix = fmt.Sprintf("%s.", apiPrefix) - } - js := &jetStream{ - conn: nc, - jsOpts: jsOpts, - publisher: &jetStreamClient{asyncPublisherOpts: jsOpts.publisherOpts}, - } - return js, nil -} - -// NewWithDomain returns a new JetStream instance and sets the domain name token used when sending JetStream requests. -// The domain name token will be used in API requests to JetStream, e.g. $JS..API.STREAM.INFO.. -// -// Available options: -// - [WithClientTrace] - enables request/response tracing. -// - [WithPublishAsyncErrHandler] - sets error handler for async message publish. -// - [WithPublishAsyncMaxPending] - sets the maximum outstanding async publishes -// that can be inflight at one time. -func NewWithDomain(nc *nats.Conn, domain string, opts ...JetStreamOpt) (JetStream, error) { - jsOpts := jsOpts{ - publisherOpts: asyncPublisherOpts{ - maxpa: defaultAsyncPubAckInflight, - }, - } - setReplyPrefix(nc, &jsOpts) - for _, opt := range opts { - if err := opt(&jsOpts); err != nil { - return nil, err - } - } - if domain == "" { - return nil, errors.New("domain cannot be empty") - } - jsOpts.apiPrefix = fmt.Sprintf(jsDomainT, domain) - js := &jetStream{ - conn: nc, - jsOpts: jsOpts, - publisher: &jetStreamClient{asyncPublisherOpts: jsOpts.publisherOpts}, - } - return js, nil -} - -// CreateStream creates a new stream with given config and returns an -// interface to operate on it. If stream with given name already exists, -// ErrStreamNameAlreadyInUse is returned. -func (js *jetStream) CreateStream(ctx context.Context, cfg StreamConfig) (Stream, error) { - if err := validateStreamName(cfg.Name); err != nil { - return nil, err - } - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - ncfg := cfg - // If we have a mirror and an external domain, convert to ext.APIPrefix. - if ncfg.Mirror != nil && ncfg.Mirror.Domain != "" { - // Copy so we do not change the caller's version. - ncfg.Mirror = ncfg.Mirror.copy() - if err := ncfg.Mirror.convertDomain(); err != nil { - return nil, err - } - } - - // Check sources for the same. - if len(ncfg.Sources) > 0 { - ncfg.Sources = append([]*StreamSource(nil), ncfg.Sources...) - for i, ss := range ncfg.Sources { - if ss.Domain != "" { - ncfg.Sources[i] = ss.copy() - if err := ncfg.Sources[i].convertDomain(); err != nil { - return nil, err - } - } - } - } - - req, err := json.Marshal(ncfg) - if err != nil { - return nil, err - } - - createSubject := apiSubj(js.apiPrefix, fmt.Sprintf(apiStreamCreateT, cfg.Name)) - var resp streamInfoResponse - - if _, err = js.apiRequestJSON(ctx, createSubject, &resp, req); err != nil { - return nil, err - } - if resp.Error != nil { - if resp.Error.ErrorCode == JSErrCodeStreamNameInUse { - return nil, ErrStreamNameAlreadyInUse - } - return nil, resp.Error - } - - // check that input subject transform (if used) is reflected in the returned StreamInfo - if cfg.SubjectTransform != nil && resp.StreamInfo.Config.SubjectTransform == nil { - return nil, ErrStreamSubjectTransformNotSupported - } - - if len(cfg.Sources) != 0 { - if len(cfg.Sources) != len(resp.Config.Sources) { - return nil, ErrStreamSourceNotSupported - } - for i := range cfg.Sources { - if len(cfg.Sources[i].SubjectTransforms) != 0 && len(resp.Sources[i].SubjectTransforms) == 0 { - return nil, ErrStreamSourceMultipleFilterSubjectsNotSupported - } - } - } - - return &stream{ - jetStream: js, - name: cfg.Name, - info: resp.StreamInfo, - }, nil -} - -// If we have a Domain, convert to the appropriate ext.APIPrefix. -// This will change the stream source, so should be a copy passed in. -func (ss *StreamSource) convertDomain() error { - if ss.Domain == "" { - return nil - } - if ss.External != nil { - return errors.New("nats: domain and external are both set") - } - ss.External = &ExternalStream{APIPrefix: fmt.Sprintf(jsExtDomainT, ss.Domain)} - return nil -} - -// Helper for copying when we do not want to change user's version. -func (ss *StreamSource) copy() *StreamSource { - nss := *ss - // Check pointers - if ss.OptStartTime != nil { - t := *ss.OptStartTime - nss.OptStartTime = &t - } - if ss.External != nil { - ext := *ss.External - nss.External = &ext - } - return &nss -} - -// UpdateStream updates an existing stream. If stream does not exist, -// ErrStreamNotFound is returned. -func (js *jetStream) UpdateStream(ctx context.Context, cfg StreamConfig) (Stream, error) { - if err := validateStreamName(cfg.Name); err != nil { - return nil, err - } - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - - req, err := json.Marshal(cfg) - if err != nil { - return nil, err - } - - updateSubject := apiSubj(js.apiPrefix, fmt.Sprintf(apiStreamUpdateT, cfg.Name)) - var resp streamInfoResponse - - if _, err = js.apiRequestJSON(ctx, updateSubject, &resp, req); err != nil { - return nil, err - } - if resp.Error != nil { - if resp.Error.ErrorCode == JSErrCodeStreamNotFound { - return nil, ErrStreamNotFound - } - return nil, resp.Error - } - - // check that input subject transform (if used) is reflected in the returned StreamInfo - if cfg.SubjectTransform != nil && resp.StreamInfo.Config.SubjectTransform == nil { - return nil, ErrStreamSubjectTransformNotSupported - } - - if len(cfg.Sources) != 0 { - if len(cfg.Sources) != len(resp.Config.Sources) { - return nil, ErrStreamSourceNotSupported - } - for i := range cfg.Sources { - if len(cfg.Sources[i].SubjectTransforms) != 0 && len(resp.Sources[i].SubjectTransforms) == 0 { - return nil, ErrStreamSourceMultipleFilterSubjectsNotSupported - } - } - } - - return &stream{ - jetStream: js, - name: cfg.Name, - info: resp.StreamInfo, - }, nil -} - -// CreateOrUpdateStream creates a stream with given config. If stream -// already exists, it will be updated (if possible). -func (js *jetStream) CreateOrUpdateStream(ctx context.Context, cfg StreamConfig) (Stream, error) { - s, err := js.UpdateStream(ctx, cfg) - if err != nil { - if !errors.Is(err, ErrStreamNotFound) { - return nil, err - } - return js.CreateStream(ctx, cfg) - } - - return s, nil -} - -// Stream fetches [StreamInfo] and returns a [Stream] interface for a given stream name. -// If stream does not exist, ErrStreamNotFound is returned. -func (js *jetStream) Stream(ctx context.Context, name string) (Stream, error) { - if err := validateStreamName(name); err != nil { - return nil, err - } - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - infoSubject := apiSubj(js.apiPrefix, fmt.Sprintf(apiStreamInfoT, name)) - - var resp streamInfoResponse - - if _, err := js.apiRequestJSON(ctx, infoSubject, &resp); err != nil { - return nil, err - } - if resp.Error != nil { - if resp.Error.ErrorCode == JSErrCodeStreamNotFound { - return nil, ErrStreamNotFound - } - return nil, resp.Error - } - return &stream{ - jetStream: js, - name: name, - info: resp.StreamInfo, - }, nil -} - -// DeleteStream removes a stream with given name -func (js *jetStream) DeleteStream(ctx context.Context, name string) error { - if err := validateStreamName(name); err != nil { - return err - } - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - deleteSubject := apiSubj(js.apiPrefix, fmt.Sprintf(apiStreamDeleteT, name)) - var resp streamDeleteResponse - - if _, err := js.apiRequestJSON(ctx, deleteSubject, &resp); err != nil { - return err - } - if resp.Error != nil { - if resp.Error.ErrorCode == JSErrCodeStreamNotFound { - return ErrStreamNotFound - } - return resp.Error - } - return nil -} - -// CreateOrUpdateConsumer creates a consumer on a given stream with -// given config. If consumer already exists, it will be updated (if -// possible). Consumer interface is returned, allowing to operate on a -// consumer (e.g. fetch messages). -func (js *jetStream) CreateOrUpdateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) { - if err := validateStreamName(stream); err != nil { - return nil, err - } - return upsertConsumer(ctx, js, stream, cfg, consumerActionCreateOrUpdate) -} - -// CreateConsumer creates a consumer on a given stream with given -// config. If consumer already exists and the provided configuration -// differs from its configuration, ErrConsumerExists is returned. If the -// provided configuration is the same as the existing consumer, the -// existing consumer is returned. Consumer interface is returned, -// allowing to operate on a consumer (e.g. fetch messages). -func (js *jetStream) CreateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) { - if err := validateStreamName(stream); err != nil { - return nil, err - } - return upsertConsumer(ctx, js, stream, cfg, consumerActionCreate) -} - -// UpdateConsumer updates an existing consumer. If consumer does not -// exist, ErrConsumerDoesNotExist is returned. Consumer interface is -// returned, allowing to operate on a consumer (e.g. fetch messages). -func (js *jetStream) UpdateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) { - if err := validateStreamName(stream); err != nil { - return nil, err - } - return upsertConsumer(ctx, js, stream, cfg, consumerActionUpdate) -} - -// OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer -// are managed by the library and provide a simple way to consume -// messages from a stream. Ordered consumers are ephemeral in-memory -// pull consumers and are resilient to deletes and restarts. -func (js *jetStream) OrderedConsumer(ctx context.Context, stream string, cfg OrderedConsumerConfig) (Consumer, error) { - if err := validateStreamName(stream); err != nil { - return nil, err - } - oc := &orderedConsumer{ - jetStream: js, - cfg: &cfg, - stream: stream, - namePrefix: nuid.Next(), - doReset: make(chan struct{}, 1), - } - if cfg.OptStartSeq != 0 { - oc.cursor.streamSeq = cfg.OptStartSeq - 1 - } - err := oc.reset() - if err != nil { - return nil, err - } - - return oc, nil -} - -// Consumer returns an interface to an existing consumer, allowing processing -// of messages. If consumer does not exist, ErrConsumerNotFound is -// returned. -func (js *jetStream) Consumer(ctx context.Context, stream string, name string) (Consumer, error) { - if err := validateStreamName(stream); err != nil { - return nil, err - } - return getConsumer(ctx, js, stream, name) -} - -// DeleteConsumer removes a consumer with given name from a stream. -// If consumer does not exist, ErrConsumerNotFound is returned. -func (js *jetStream) DeleteConsumer(ctx context.Context, stream string, name string) error { - if err := validateStreamName(stream); err != nil { - return err - } - return deleteConsumer(ctx, js, stream, name) -} - -func validateStreamName(stream string) error { - if stream == "" { - return ErrStreamNameRequired - } - if strings.Contains(stream, ".") { - return fmt.Errorf("%w: '%s'", ErrInvalidStreamName, stream) - } - return nil -} - -func validateSubject(subject string) error { - if subject == "" { - return fmt.Errorf("%w: %s", ErrInvalidSubject, "subject cannot be empty") - } - if !subjectRegexp.MatchString(subject) { - return fmt.Errorf("%w: %s", ErrInvalidSubject, subject) - } - return nil -} - -// AccountInfo fetches account information from the server, containing details -// about the account associated with this JetStream connection. If account is -// not enabled for JetStream, ErrJetStreamNotEnabledForAccount is returned. If -// the server does not have JetStream enabled, ErrJetStreamNotEnabled is -// returned. -func (js *jetStream) AccountInfo(ctx context.Context) (*AccountInfo, error) { - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - var resp accountInfoResponse - - infoSubject := apiSubj(js.apiPrefix, apiAccountInfo) - if _, err := js.apiRequestJSON(ctx, infoSubject, &resp); err != nil { - if errors.Is(err, nats.ErrNoResponders) { - return nil, ErrJetStreamNotEnabled - } - return nil, err - } - if resp.Error != nil { - if resp.Error.ErrorCode == JSErrCodeJetStreamNotEnabledForAccount { - return nil, ErrJetStreamNotEnabledForAccount - } - if resp.Error.ErrorCode == JSErrCodeJetStreamNotEnabled { - return nil, ErrJetStreamNotEnabled - } - return nil, resp.Error - } - - return &resp.AccountInfo, nil -} - -// ListStreams returns StreamInfoLister, enabling iterating over a -// channel of stream infos. -func (js *jetStream) ListStreams(ctx context.Context, opts ...StreamListOpt) StreamInfoLister { - l := &streamLister{ - js: js, - streams: make(chan *StreamInfo), - } - var streamsReq streamsRequest - for _, opt := range opts { - if err := opt(&streamsReq); err != nil { - l.err = err - close(l.streams) - return l - } - } - go func() { - defer close(l.streams) - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - for { - page, err := l.streamInfos(ctx, streamsReq) - if err != nil && !errors.Is(err, ErrEndOfData) { - l.err = err - return - } - for _, info := range page { - select { - case l.streams <- info: - case <-ctx.Done(): - l.err = ctx.Err() - return - } - } - if errors.Is(err, ErrEndOfData) { - return - } - } - }() - - return l -} - -// Info returns a channel allowing retrieval of stream infos returned by [ListStreams] -func (s *streamLister) Info() <-chan *StreamInfo { - return s.streams -} - -// Err returns an error channel which will be populated with error from [ListStreams] or [StreamNames] request -func (s *streamLister) Err() error { - return s.err -} - -// StreamNames returns a StreamNameLister, enabling iterating over a -// channel of stream names. -func (js *jetStream) StreamNames(ctx context.Context, opts ...StreamListOpt) StreamNameLister { - l := &streamLister{ - js: js, - names: make(chan string), - } - var streamsReq streamsRequest - for _, opt := range opts { - if err := opt(&streamsReq); err != nil { - l.err = err - close(l.streams) - return l - } - } - go func() { - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - defer close(l.names) - for { - page, err := l.streamNames(ctx, streamsReq) - if err != nil && !errors.Is(err, ErrEndOfData) { - l.err = err - return - } - for _, info := range page { - select { - case l.names <- info: - case <-ctx.Done(): - l.err = ctx.Err() - return - } - } - if errors.Is(err, ErrEndOfData) { - return - } - } - }() - - return l -} - -// StreamNameBySubject returns a stream name stream listening on given -// subject. If no stream is bound to given subject, ErrStreamNotFound -// is returned. -func (js *jetStream) StreamNameBySubject(ctx context.Context, subject string) (string, error) { - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - if err := validateSubject(subject); err != nil { - return "", err - } - streamsSubject := apiSubj(js.apiPrefix, apiStreams) - - r := &streamsRequest{Subject: subject} - req, err := json.Marshal(r) - if err != nil { - return "", err - } - var resp streamNamesResponse - _, err = js.apiRequestJSON(ctx, streamsSubject, &resp, req) - if err != nil { - return "", err - } - if resp.Error != nil { - return "", resp.Error - } - if len(resp.Streams) == 0 { - return "", ErrStreamNotFound - } - - return resp.Streams[0], nil -} - -// Name returns a channel allowing retrieval of stream names returned by [StreamNames] -func (s *streamLister) Name() <-chan string { - return s.names -} - -// infos fetches the next [StreamInfo] page -func (s *streamLister) streamInfos(ctx context.Context, streamsReq streamsRequest) ([]*StreamInfo, error) { - if s.pageInfo != nil && s.offset >= s.pageInfo.Total { - return nil, ErrEndOfData - } - - req := streamsRequest{ - apiPagedRequest: apiPagedRequest{ - Offset: s.offset, - }, - Subject: streamsReq.Subject, - } - reqJSON, err := json.Marshal(req) - if err != nil { - return nil, err - } - - slSubj := apiSubj(s.js.apiPrefix, apiStreamListT) - var resp streamListResponse - _, err = s.js.apiRequestJSON(ctx, slSubj, &resp, reqJSON) - if err != nil { - return nil, err - } - if resp.Error != nil { - return nil, resp.Error - } - - s.pageInfo = &resp.apiPaged - s.offset += len(resp.Streams) - return resp.Streams, nil -} - -// streamNames fetches the next stream names page -func (s *streamLister) streamNames(ctx context.Context, streamsReq streamsRequest) ([]string, error) { - if s.pageInfo != nil && s.offset >= s.pageInfo.Total { - return nil, ErrEndOfData - } - - req := streamsRequest{ - apiPagedRequest: apiPagedRequest{ - Offset: s.offset, - }, - Subject: streamsReq.Subject, - } - reqJSON, err := json.Marshal(req) - if err != nil { - return nil, err - } - - slSubj := apiSubj(s.js.apiPrefix, apiStreams) - var resp streamNamesResponse - _, err = s.js.apiRequestJSON(ctx, slSubj, &resp, reqJSON) - if err != nil { - return nil, err - } - if resp.Error != nil { - return nil, resp.Error - } - - s.pageInfo = &resp.apiPaged - s.offset += len(resp.Streams) - return resp.Streams, nil -} - -// wrapContextWithoutDeadline wraps context without deadline with default timeout. -// If deadline is already set, it will be returned as is, and cancel() will be nil. -// Caller should check if cancel() is nil before calling it. -func wrapContextWithoutDeadline(ctx context.Context) (context.Context, context.CancelFunc) { - if _, ok := ctx.Deadline(); ok { - return ctx, nil - } - return context.WithTimeout(ctx, defaultAPITimeout) -} - -func (js *jetStream) cleanupReplySub() { - if js.publisher == nil { - return - } - js.publisher.Lock() - if js.publisher.replySub != nil { - js.publisher.replySub.Unsubscribe() - js.publisher.replySub = nil - } - if js.publisher.connStatusCh != nil { - close(js.publisher.connStatusCh) - js.publisher.connStatusCh = nil - } - js.publisher.Unlock() -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/jetstream_options.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/jetstream_options.go deleted file mode 100644 index a08d203..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/jetstream_options.go +++ /dev/null @@ -1,408 +0,0 @@ -// Copyright 2022-2024 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jetstream - -import ( - "fmt" - "time" -) - -type pullOptFunc func(*consumeOpts) error - -func (fn pullOptFunc) configureConsume(opts *consumeOpts) error { - return fn(opts) -} - -func (fn pullOptFunc) configureMessages(opts *consumeOpts) error { - return fn(opts) -} - -// WithClientTrace enables request/response API calls tracing. -func WithClientTrace(ct *ClientTrace) JetStreamOpt { - return func(opts *jsOpts) error { - opts.clientTrace = ct - return nil - } -} - -// WithPublishAsyncErrHandler sets error handler for async message publish. -func WithPublishAsyncErrHandler(cb MsgErrHandler) JetStreamOpt { - return func(opts *jsOpts) error { - opts.publisherOpts.aecb = cb - return nil - } -} - -// WithPublishAsyncMaxPending sets the maximum outstanding async publishes that -// can be inflight at one time. -func WithPublishAsyncMaxPending(max int) JetStreamOpt { - return func(opts *jsOpts) error { - if max < 1 { - return fmt.Errorf("%w: max ack pending should be >= 1", ErrInvalidOption) - } - opts.publisherOpts.maxpa = max - return nil - } -} - -// WithPurgeSubject sets a specific subject for which messages on a stream will -// be purged -func WithPurgeSubject(subject string) StreamPurgeOpt { - return func(req *StreamPurgeRequest) error { - req.Subject = subject - return nil - } -} - -// WithPurgeSequence is used to set a specific sequence number up to which (but -// not including) messages will be purged from a stream Can be combined with -// [WithPurgeSubject] option, but not with [WithPurgeKeep] -func WithPurgeSequence(sequence uint64) StreamPurgeOpt { - return func(req *StreamPurgeRequest) error { - if req.Keep != 0 { - return fmt.Errorf("%w: both 'keep' and 'sequence' cannot be provided in purge request", ErrInvalidOption) - } - req.Sequence = sequence - return nil - } -} - -// WithPurgeKeep sets the number of messages to be kept in the stream after -// purge. Can be combined with [WithPurgeSubject] option, but not with -// [WithPurgeSequence] -func WithPurgeKeep(keep uint64) StreamPurgeOpt { - return func(req *StreamPurgeRequest) error { - if req.Sequence != 0 { - return fmt.Errorf("%w: both 'keep' and 'sequence' cannot be provided in purge request", ErrInvalidOption) - } - req.Keep = keep - return nil - } -} - -// WithGetMsgSubject sets the stream subject from which the message should be -// retrieved. Server will return a first message with a seq >= to the input seq -// that has the specified subject. -func WithGetMsgSubject(subject string) GetMsgOpt { - return func(req *apiMsgGetRequest) error { - req.NextFor = subject - return nil - } -} - -// PullMaxMessages limits the number of messages to be buffered in the client. -// If not provided, a default of 500 messages will be used. -// This option is exclusive with PullMaxBytes. -type PullMaxMessages int - -func (max PullMaxMessages) configureConsume(opts *consumeOpts) error { - if max <= 0 { - return fmt.Errorf("%w: maxMessages size must be at least 1", ErrInvalidOption) - } - opts.MaxMessages = int(max) - return nil -} - -func (max PullMaxMessages) configureMessages(opts *consumeOpts) error { - if max <= 0 { - return fmt.Errorf("%w: maxMessages size must be at least 1", ErrInvalidOption) - } - opts.MaxMessages = int(max) - return nil -} - -// PullExpiry sets timeout on a single pull request, waiting until at least one -// message is available. -// If not provided, a default of 30 seconds will be used. -type PullExpiry time.Duration - -func (exp PullExpiry) configureConsume(opts *consumeOpts) error { - expiry := time.Duration(exp) - if expiry < time.Second { - return fmt.Errorf("%w: expires value must be at least 1s", ErrInvalidOption) - } - opts.Expires = expiry - return nil -} - -func (exp PullExpiry) configureMessages(opts *consumeOpts) error { - expiry := time.Duration(exp) - if expiry < time.Second { - return fmt.Errorf("%w: expires value must be at least 1s", ErrInvalidOption) - } - opts.Expires = expiry - return nil -} - -// PullMaxBytes limits the number of bytes to be buffered in the client. -// If not provided, the limit is not set (max messages will be used instead). -// This option is exclusive with PullMaxMessages. -type PullMaxBytes int - -func (max PullMaxBytes) configureConsume(opts *consumeOpts) error { - if max <= 0 { - return fmt.Errorf("%w: max bytes must be greater then 0", ErrInvalidOption) - } - opts.MaxBytes = int(max) - return nil -} - -func (max PullMaxBytes) configureMessages(opts *consumeOpts) error { - if max <= 0 { - return fmt.Errorf("%w: max bytes must be greater then 0", ErrInvalidOption) - } - opts.MaxBytes = int(max) - return nil -} - -// PullThresholdMessages sets the message count on which Consume will trigger -// new pull request to the server. Defaults to 50% of MaxMessages. -type PullThresholdMessages int - -func (t PullThresholdMessages) configureConsume(opts *consumeOpts) error { - opts.ThresholdMessages = int(t) - return nil -} - -func (t PullThresholdMessages) configureMessages(opts *consumeOpts) error { - opts.ThresholdMessages = int(t) - return nil -} - -// PullThresholdBytes sets the byte count on which Consume will trigger -// new pull request to the server. Defaults to 50% of MaxBytes (if set). -type PullThresholdBytes int - -func (t PullThresholdBytes) configureConsume(opts *consumeOpts) error { - opts.ThresholdBytes = int(t) - return nil -} - -func (t PullThresholdBytes) configureMessages(opts *consumeOpts) error { - opts.ThresholdBytes = int(t) - return nil -} - -// PullHeartbeat sets the idle heartbeat duration for a pull subscription -// If a client does not receive a heartbeat message from a stream for more -// than the idle heartbeat setting, the subscription will be removed -// and error will be passed to the message handler. -// If not provided, a default PullExpiry / 2 will be used (capped at 30 seconds) -type PullHeartbeat time.Duration - -func (hb PullHeartbeat) configureConsume(opts *consumeOpts) error { - hbTime := time.Duration(hb) - if hbTime < 500*time.Millisecond || hbTime > 30*time.Second { - return fmt.Errorf("%w: idle_heartbeat value must be within 500ms-30s range", ErrInvalidOption) - } - opts.Heartbeat = hbTime - return nil -} - -func (hb PullHeartbeat) configureMessages(opts *consumeOpts) error { - hbTime := time.Duration(hb) - if hbTime < 500*time.Millisecond || hbTime > 30*time.Second { - return fmt.Errorf("%w: idle_heartbeat value must be within 500ms-30s range", ErrInvalidOption) - } - opts.Heartbeat = hbTime - return nil -} - -// StopAfter sets the number of messages after which the consumer is -// automatically stopped and no more messages are pulled from the server. -type StopAfter int - -func (nMsgs StopAfter) configureConsume(opts *consumeOpts) error { - if nMsgs <= 0 { - return fmt.Errorf("%w: auto stop after value cannot be less than 1", ErrInvalidOption) - } - opts.StopAfter = int(nMsgs) - return nil -} - -func (nMsgs StopAfter) configureMessages(opts *consumeOpts) error { - if nMsgs <= 0 { - return fmt.Errorf("%w: auto stop after value cannot be less than 1", ErrInvalidOption) - } - opts.StopAfter = int(nMsgs) - return nil -} - -// ConsumeErrHandler sets custom error handler invoked when an error was -// encountered while consuming messages It will be invoked for both terminal -// (Consumer Deleted, invalid request body) and non-terminal (e.g. missing -// heartbeats) errors. -func ConsumeErrHandler(cb ConsumeErrHandlerFunc) PullConsumeOpt { - return pullOptFunc(func(cfg *consumeOpts) error { - cfg.ErrHandler = cb - return nil - }) -} - -// WithMessagesErrOnMissingHeartbeat sets whether a missing heartbeat error -// should be reported when calling [MessagesContext.Next] (Default: true). -func WithMessagesErrOnMissingHeartbeat(hbErr bool) PullMessagesOpt { - return pullOptFunc(func(cfg *consumeOpts) error { - cfg.ReportMissingHeartbeats = hbErr - return nil - }) -} - -// FetchMaxWait sets custom timeout for fetching predefined batch of messages. -// -// If not provided, a default of 30 seconds will be used. -func FetchMaxWait(timeout time.Duration) FetchOpt { - return func(req *pullRequest) error { - if timeout <= 0 { - return fmt.Errorf("%w: timeout value must be greater than 0", ErrInvalidOption) - } - req.Expires = timeout - return nil - } -} - -// FetchHeartbeat sets custom heartbeat for individual fetch request. If a -// client does not receive a heartbeat message from a stream for more than 2 -// times the idle heartbeat setting, Fetch will return [ErrNoHeartbeat]. -// -// Heartbeat value has to be lower than FetchMaxWait / 2. -// -// If not provided, heartbeat will is set to 5s for requests with FetchMaxWait > 10s -// and disabled otherwise. -func FetchHeartbeat(hb time.Duration) FetchOpt { - return func(req *pullRequest) error { - if hb <= 0 { - return fmt.Errorf("%w: timeout value must be greater than 0", ErrInvalidOption) - } - req.Heartbeat = hb - return nil - } -} - -// WithDeletedDetails can be used to display the information about messages -// deleted from a stream on a stream info request -func WithDeletedDetails(deletedDetails bool) StreamInfoOpt { - return func(req *streamInfoRequest) error { - req.DeletedDetails = deletedDetails - return nil - } -} - -// WithSubjectFilter can be used to display the information about messages -// stored on given subjects. -// NOTE: if the subject filter matches over 100k -// subjects, this will result in multiple requests to the server to retrieve all -// the information, and all of the returned subjects will be kept in memory. -func WithSubjectFilter(subject string) StreamInfoOpt { - return func(req *streamInfoRequest) error { - req.SubjectFilter = subject - return nil - } -} - -// WithStreamListSubject can be used to filter results of ListStreams and -// StreamNames requests to only streams that have given subject in their -// configuration. -func WithStreamListSubject(subject string) StreamListOpt { - return func(req *streamsRequest) error { - req.Subject = subject - return nil - } -} - -// WithMsgID sets the message ID used for deduplication. -func WithMsgID(id string) PublishOpt { - return func(opts *pubOpts) error { - opts.id = id - return nil - } -} - -// WithExpectStream sets the expected stream the message should be published to. -// If the message is published to a different stream server will reject the -// message and publish will fail. -func WithExpectStream(stream string) PublishOpt { - return func(opts *pubOpts) error { - opts.stream = stream - return nil - } -} - -// WithExpectLastSequence sets the expected sequence number the last message -// on a stream should have. If the last message has a different sequence number -// server will reject the message and publish will fail. -func WithExpectLastSequence(seq uint64) PublishOpt { - return func(opts *pubOpts) error { - opts.lastSeq = &seq - return nil - } -} - -// WithExpectLastSequencePerSubject sets the expected sequence number the last -// message on a subject the message is published to. If the last message on a -// subject has a different sequence number server will reject the message and -// publish will fail. -func WithExpectLastSequencePerSubject(seq uint64) PublishOpt { - return func(opts *pubOpts) error { - opts.lastSubjectSeq = &seq - return nil - } -} - -// WithExpectLastMsgID sets the expected message ID the last message on a stream -// should have. If the last message has a different message ID server will -// reject the message and publish will fail. -func WithExpectLastMsgID(id string) PublishOpt { - return func(opts *pubOpts) error { - opts.lastMsgID = id - return nil - } -} - -// WithRetryWait sets the retry wait time when ErrNoResponders is encountered. -// Defaults to 250ms. -func WithRetryWait(dur time.Duration) PublishOpt { - return func(opts *pubOpts) error { - if dur <= 0 { - return fmt.Errorf("%w: retry wait should be more than 0", ErrInvalidOption) - } - opts.retryWait = dur - return nil - } -} - -// WithRetryAttempts sets the retry number of attempts when ErrNoResponders is -// encountered. Defaults to 2 -func WithRetryAttempts(num int) PublishOpt { - return func(opts *pubOpts) error { - if num < 0 { - return fmt.Errorf("%w: retry attempts cannot be negative", ErrInvalidOption) - } - opts.retryAttempts = num - return nil - } -} - -// WithStallWait sets the max wait when the producer becomes stall producing -// messages. If a publish call is blocked for this long, ErrTooManyStalledMsgs -// is returned. -func WithStallWait(ttl time.Duration) PublishOpt { - return func(opts *pubOpts) error { - if ttl <= 0 { - return fmt.Errorf("%w: stall wait should be more than 0", ErrInvalidOption) - } - opts.stallWait = ttl - return nil - } -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/kv.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/kv.go deleted file mode 100644 index 42a86c5..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/kv.go +++ /dev/null @@ -1,1338 +0,0 @@ -// Copyright 2023-2024 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jetstream - -import ( - "context" - "errors" - "fmt" - "regexp" - "strconv" - "strings" - "sync" - "time" - - "github.com/nats-io/nats.go" - "github.com/nats-io/nats.go/internal/parser" -) - -type ( - // KeyValueManager is used to manage KeyValue stores. It provides methods to - // create, delete, and retrieve KeyValue stores. - KeyValueManager interface { - // KeyValue will lookup and bind to an existing KeyValue store. - // - // If the KeyValue store with given name does not exist, - // ErrBucketNotFound will be returned. - KeyValue(ctx context.Context, bucket string) (KeyValue, error) - - // CreateKeyValue will create a KeyValue store with the given - // configuration. - // - // If a KeyValue store with the same name already exists and the - // configuration is different, ErrBucketExists will be returned. - CreateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) - - // UpdateKeyValue will update an existing KeyValue store with the given - // configuration. - // - // If a KeyValue store with the given name does not exist, ErrBucketNotFound - // will be returned. - UpdateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) - - // CreateOrUpdateKeyValue will create a KeyValue store if it does not - // exist or update an existing KeyValue store with the given - // configuration (if possible). - CreateOrUpdateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) - - // DeleteKeyValue will delete this KeyValue store. - // - // If the KeyValue store with given name does not exist, - // ErrBucketNotFound will be returned. - DeleteKeyValue(ctx context.Context, bucket string) error - - // KeyValueStoreNames is used to retrieve a list of key value store - // names. It returns a KeyValueNamesLister exposing a channel to read - // the names from. The lister will always close the channel when done - // (either all names have been read or an error occurred) and therefore - // can be used in range loops. - KeyValueStoreNames(ctx context.Context) KeyValueNamesLister - - // KeyValueStores is used to retrieve a list of key value store - // statuses. It returns a KeyValueLister exposing a channel to read the - // statuses from. The lister will always close the channel when done - // (either all statuses have been read or an error occurred) and - // therefore can be used in range loops. - KeyValueStores(ctx context.Context) KeyValueLister - } - - // KeyValue contains methods to operate on a KeyValue store. - // Using the KeyValue interface, it is possible to: - // - // - Get, Put, Create, Update, Delete and Purge a key - // - Watch for updates to keys - // - List all keys - // - Retrieve historical values for a key - // - Retrieve status and configuration of a key value bucket - // - Purge all delete markers - // - Close the KeyValue store - KeyValue interface { - // Get returns the latest value for the key. If the key does not exist, - // ErrKeyNotFound will be returned. - Get(ctx context.Context, key string) (KeyValueEntry, error) - - // GetRevision returns a specific revision value for the key. If the key - // does not exist or the provided revision does not exists, - // ErrKeyNotFound will be returned. - GetRevision(ctx context.Context, key string, revision uint64) (KeyValueEntry, error) - - // Put will place the new value for the key into the store. If the key - // does not exist, it will be created. If the key exists, the value will - // be updated. - // - // A key has to consist of alphanumeric characters, dashes, underscores, - // equal signs, and dots. - Put(ctx context.Context, key string, value []byte) (uint64, error) - - // PutString will place the string for the key into the store. If the - // key does not exist, it will be created. If the key exists, the value - // will be updated. - // - // A key has to consist of alphanumeric characters, dashes, underscores, - // equal signs, and dots. - PutString(ctx context.Context, key string, value string) (uint64, error) - - // Create will add the key/value pair if it does not exist. If the key - // already exists, ErrKeyExists will be returned. - // - // A key has to consist of alphanumeric characters, dashes, underscores, - // equal signs, and dots. - Create(ctx context.Context, key string, value []byte) (uint64, error) - - // Update will update the value if the latest revision matches. - // If the provided revision is not the latest, Update will return an error. - Update(ctx context.Context, key string, value []byte, revision uint64) (uint64, error) - - // Delete will place a delete marker and leave all revisions. A history - // of a deleted key can still be retrieved by using the History method - // or a watch on the key. [Delete] is a non-destructive operation and - // will not remove any previous revisions from the underlying stream. - // - // [LastRevision] option can be specified to only perform delete if the - // latest revision the provided one. - Delete(ctx context.Context, key string, opts ...KVDeleteOpt) error - - // Purge will place a delete marker and remove all previous revisions. - // Only the latest revision will be preserved (with a delete marker). - // Unlike [Delete], Purge is a destructive operation and will remove all - // previous revisions from the underlying streams. - // - // [LastRevision] option can be specified to only perform purge if the - // latest revision the provided one. - Purge(ctx context.Context, key string, opts ...KVDeleteOpt) error - - // Watch for any updates to keys that match the keys argument which - // could include wildcards. By default, the watcher will send the latest - // value for each key and all future updates. Watch will send a nil - // entry when it has received all initial values. There are a few ways - // to configure the watcher: - // - // - IncludeHistory will have the key watcher send all historical values - // for each key (up to KeyValueMaxHistory). - // - IgnoreDeletes will have the key watcher not pass any keys with - // delete markers. - // - UpdatesOnly will have the key watcher only pass updates on values - // (without latest values when started). - // - MetaOnly will have the key watcher retrieve only the entry meta - // data, not the entry value. - // - ResumeFromRevision instructs the key watcher to resume from a - // specific revision number. - Watch(ctx context.Context, keys string, opts ...WatchOpt) (KeyWatcher, error) - - // WatchAll will watch for any updates to all keys. It can be configured - // with the same options as Watch. - WatchAll(ctx context.Context, opts ...WatchOpt) (KeyWatcher, error) - - // Keys will return all keys. DEPRECATED: Use ListKeys instead to avoid - // memory issues. - Keys(ctx context.Context, opts ...WatchOpt) ([]string, error) - - // ListKeys will return KeyLister, allowing to retrieve all keys from - // the key value store in a streaming fashion (on a channel). - ListKeys(ctx context.Context, opts ...WatchOpt) (KeyLister, error) - - // History will return all historical values for the key (up to - // KeyValueMaxHistory). - History(ctx context.Context, key string, opts ...WatchOpt) ([]KeyValueEntry, error) - - // Bucket returns the KV store name. - Bucket() string - - // PurgeDeletes will remove all current delete markers. It can be - // configured using DeleteMarkersOlderThan option to only remove delete - // markers older than a certain duration. - // - // [PurgeDeletes] is a destructive operation and will remove all entries - // with delete markers from the underlying stream. - PurgeDeletes(ctx context.Context, opts ...KVPurgeOpt) error - - // Status retrieves the status and configuration of a bucket. - Status(ctx context.Context) (KeyValueStatus, error) - } - - // KeyValueConfig is the configuration for a KeyValue store. - KeyValueConfig struct { - // Bucket is the name of the KeyValue store. Bucket name has to be - // unique and can only contain alphanumeric characters, dashes, and - // underscores. - Bucket string - - // Description is an optional description for the KeyValue store. - Description string - - // MaxValueSize is the maximum size of a value in bytes. If not - // specified, the default is -1 (unlimited). - MaxValueSize int32 - - // History is the number of historical values to keep per key. If not - // specified, the default is 1. Max is 64. - History uint8 - - // TTL is the expiry time for keys. By default, keys do not expire. - TTL time.Duration - - // MaxBytes is the maximum size in bytes of the KeyValue store. If not - // specified, the default is -1 (unlimited). - MaxBytes int64 - - // Storage is the type of storage to use for the KeyValue store. If not - // specified, the default is FileStorage. - Storage StorageType - - // Replicas is the number of replicas to keep for the KeyValue store in - // clustered jetstream. Defaults to 1, maximum is 5. - Replicas int - - // Placement is used to declare where the stream should be placed via - // tags and/or an explicit cluster name. - Placement *Placement - - // RePublish allows immediate republishing a message to the configured - // subject after it's stored. - RePublish *RePublish - - // Mirror defines the consiguration for mirroring another KeyValue - // store. - Mirror *StreamSource - - // Sources defines the configuration for sources of a KeyValue store. - Sources []*StreamSource - - // Compression sets the underlying stream compression. - // NOTE: Compression is supported for nats-server 2.10.0+ - Compression bool - } - - // KeyLister is used to retrieve a list of key value store keys. It returns - // a channel to read the keys from. The lister will always close the channel - // when done (either all keys have been read or an error occurred) and - // therefore can be used in range loops. Stop can be used to stop the lister - // when not all keys have been read. - KeyLister interface { - Keys() <-chan string - Stop() error - } - - // KeyValueLister is used to retrieve a list of key value stores. It returns - // a channel to read the KV store statuses from. The lister will always - // close the channel when done (either all stores have been retrieved or an - // error occurred) and therefore can be used in range loops. Stop can be - // used to stop the lister when not all KeyValue stores have been read. - KeyValueLister interface { - Status() <-chan KeyValueStatus - Error() error - } - - // KeyValueNamesLister is used to retrieve a list of key value store names. - // It returns a channel to read the KV bucket names from. The lister will - // always close the channel when done (either all stores have been retrieved - // or an error occurred) and therefore can be used in range loops. Stop can - // be used to stop the lister when not all bucket names have been read. - KeyValueNamesLister interface { - Name() <-chan string - Error() error - } - - // KeyValueStatus is run-time status about a Key-Value bucket. - KeyValueStatus interface { - // Bucket returns the name of the KeyValue store. - Bucket() string - - // Values is how many messages are in the bucket, including historical values. - Values() uint64 - - // History returns the configured history kept per key. - History() int64 - - // TTL returns the duration for which keys are kept in the bucket. - TTL() time.Duration - - // BackingStore indicates what technology is used for storage of the bucket. - // Currently only JetStream is supported. - BackingStore() string - - // Bytes returns the size of the bucket in bytes. - Bytes() uint64 - - // IsCompressed indicates if the data is compressed on disk. - IsCompressed() bool - } - - // KeyWatcher is what is returned when doing a watch. It can be used to - // retrieve updates to keys. If not using UpdatesOnly option, it will also - // send the latest value for each key. After all initial values have been - // sent, a nil entry will be sent. Stop can be used to stop the watcher and - // close the underlying channel. Watcher will not close the channel until - // Stop is called or connection is closed. - KeyWatcher interface { - Updates() <-chan KeyValueEntry - Stop() error - } - - // KeyValueEntry is a retrieved entry for Get, List or Watch. - KeyValueEntry interface { - // Bucket is the bucket the data was loaded from. - Bucket() string - - // Key is the name of the key that was retrieved. - Key() string - - // Value is the retrieved value. - Value() []byte - - // Revision is a unique sequence for this value. - Revision() uint64 - - // Created is the time the data was put in the bucket. - Created() time.Time - - // Delta is distance from the latest value (how far the current sequence - // is from the latest). - Delta() uint64 - - // Operation returns Put or Delete or Purge, depending on the manner in - // which the current revision was created. - Operation() KeyValueOp - } -) - -type ( - WatchOpt interface { - configureWatcher(opts *watchOpts) error - } - - watchOpts struct { - // Do not send delete markers to the update channel. - ignoreDeletes bool - // Include all history per subject, not just last one. - includeHistory bool - // Include only updates for keys. - updatesOnly bool - // retrieve only the meta data of the entry - metaOnly bool - // resumeFromRevision is the revision to resume from. - resumeFromRevision uint64 - } - - // KVDeleteOpt is used to configure delete and purge operations. - KVDeleteOpt interface { - configureDelete(opts *deleteOpts) error - } - - deleteOpts struct { - // Remove all previous revisions. - purge bool - - // Delete only if the latest revision matches. - revision uint64 - } - - // KVPurgeOpt is used to configure PurgeDeletes. - KVPurgeOpt interface { - configurePurge(opts *purgeOpts) error - } - - purgeOpts struct { - dmthr time.Duration // Delete markers threshold - } -) - -// kvs is the implementation of KeyValue -type kvs struct { - name string - streamName string - pre string - putPre string - pushJS nats.JetStreamContext - js *jetStream - stream Stream - // If true, it means that APIPrefix/Domain was set in the context - // and we need to add something to some of our high level protocols - // (such as Put, etc..) - useJSPfx bool - // To know if we can use the stream direct get API - useDirect bool -} - -// KeyValueOp represents the type of KV operation (Put, Delete, Purge). It is a -// part of KeyValueEntry. -type KeyValueOp uint8 - -// Available KeyValueOp values. -const ( - // KeyValuePut is a set on a revision which creates or updates a value for a - // key. - KeyValuePut KeyValueOp = iota - - // KeyValueDelete is a set on a revision which adds a delete marker for a - // key. - KeyValueDelete - - // KeyValuePurge is a set on a revision which removes all previous revisions - // for a key. - KeyValuePurge -) - -func (op KeyValueOp) String() string { - switch op { - case KeyValuePut: - return "KeyValuePutOp" - case KeyValueDelete: - return "KeyValueDeleteOp" - case KeyValuePurge: - return "KeyValuePurgeOp" - default: - return "Unknown Operation" - } -} - -const ( - kvBucketNamePre = "KV_" - kvBucketNameTmpl = "KV_%s" - kvSubjectsTmpl = "$KV.%s.>" - kvSubjectsPreTmpl = "$KV.%s." - kvSubjectsPreDomainTmpl = "%s.$KV.%s." - kvNoPending = "0" -) - -const ( - KeyValueMaxHistory = 64 - AllKeys = ">" - kvLatestRevision = 0 - kvop = "KV-Operation" - kvdel = "DEL" - kvpurge = "PURGE" -) - -// Regex for valid keys and buckets. -var ( - validBucketRe = regexp.MustCompile(`\A[a-zA-Z0-9_-]+\z`) - validKeyRe = regexp.MustCompile(`\A[-/_=\.a-zA-Z0-9]+\z`) -) - -func (js *jetStream) KeyValue(ctx context.Context, bucket string) (KeyValue, error) { - if !validBucketRe.MatchString(bucket) { - return nil, ErrInvalidBucketName - } - streamName := fmt.Sprintf(kvBucketNameTmpl, bucket) - stream, err := js.Stream(ctx, streamName) - if err != nil { - if errors.Is(err, ErrStreamNotFound) { - err = ErrBucketNotFound - } - return nil, err - } - // Do some quick sanity checks that this is a correctly formed stream for KV. - // Max msgs per subject should be > 0. - if stream.CachedInfo().Config.MaxMsgsPerSubject < 1 { - return nil, ErrBadBucket - } - pushJS, err := js.legacyJetStream() - if err != nil { - return nil, err - } - - return mapStreamToKVS(js, pushJS, stream), nil -} - -func (js *jetStream) CreateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) { - scfg, err := js.prepareKeyValueConfig(ctx, cfg) - if err != nil { - return nil, err - } - - stream, err := js.CreateStream(ctx, scfg) - if err != nil { - if errors.Is(err, ErrStreamNameAlreadyInUse) { - // errors are joined so that backwards compatibility is retained - // and previous checks for ErrStreamNameAlreadyInUse will still work. - err = errors.Join(fmt.Errorf("%w: %s", ErrBucketExists, cfg.Bucket), err) - } - return nil, err - } - pushJS, err := js.legacyJetStream() - if err != nil { - return nil, err - } - - return mapStreamToKVS(js, pushJS, stream), nil -} - -func (js *jetStream) UpdateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) { - scfg, err := js.prepareKeyValueConfig(ctx, cfg) - if err != nil { - return nil, err - } - - stream, err := js.UpdateStream(ctx, scfg) - if err != nil { - if errors.Is(err, ErrStreamNotFound) { - err = fmt.Errorf("%w: %s", ErrBucketNotFound, cfg.Bucket) - } - return nil, err - } - pushJS, err := js.legacyJetStream() - if err != nil { - return nil, err - } - - return mapStreamToKVS(js, pushJS, stream), nil -} - -func (js *jetStream) CreateOrUpdateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) { - scfg, err := js.prepareKeyValueConfig(ctx, cfg) - if err != nil { - return nil, err - } - - stream, err := js.CreateOrUpdateStream(ctx, scfg) - if err != nil { - return nil, err - } - pushJS, err := js.legacyJetStream() - if err != nil { - return nil, err - } - - return mapStreamToKVS(js, pushJS, stream), nil -} - -func (js *jetStream) prepareKeyValueConfig(ctx context.Context, cfg KeyValueConfig) (StreamConfig, error) { - if !validBucketRe.MatchString(cfg.Bucket) { - return StreamConfig{}, ErrInvalidBucketName - } - if _, err := js.AccountInfo(ctx); err != nil { - return StreamConfig{}, err - } - - // Default to 1 for history. Max is 64 for now. - history := int64(1) - if cfg.History > 0 { - if cfg.History > KeyValueMaxHistory { - return StreamConfig{}, ErrHistoryTooLarge - } - history = int64(cfg.History) - } - - replicas := cfg.Replicas - if replicas == 0 { - replicas = 1 - } - - // We will set explicitly some values so that we can do comparison - // if we get an "already in use" error and need to check if it is same. - maxBytes := cfg.MaxBytes - if maxBytes == 0 { - maxBytes = -1 - } - maxMsgSize := cfg.MaxValueSize - if maxMsgSize == 0 { - maxMsgSize = -1 - } - // When stream's MaxAge is not set, server uses 2 minutes as the default - // for the duplicate window. If MaxAge is set, and lower than 2 minutes, - // then the duplicate window will be set to that. If MaxAge is greater, - // we will cap the duplicate window to 2 minutes (to be consistent with - // previous behavior). - duplicateWindow := 2 * time.Minute - if cfg.TTL > 0 && cfg.TTL < duplicateWindow { - duplicateWindow = cfg.TTL - } - var compression StoreCompression - if cfg.Compression { - compression = S2Compression - } - scfg := StreamConfig{ - Name: fmt.Sprintf(kvBucketNameTmpl, cfg.Bucket), - Description: cfg.Description, - MaxMsgsPerSubject: history, - MaxBytes: maxBytes, - MaxAge: cfg.TTL, - MaxMsgSize: maxMsgSize, - Storage: cfg.Storage, - Replicas: replicas, - Placement: cfg.Placement, - AllowRollup: true, - DenyDelete: true, - Duplicates: duplicateWindow, - MaxMsgs: -1, - MaxConsumers: -1, - AllowDirect: true, - RePublish: cfg.RePublish, - Compression: compression, - } - if cfg.Mirror != nil { - // Copy in case we need to make changes so we do not change caller's version. - m := cfg.Mirror.copy() - if !strings.HasPrefix(m.Name, kvBucketNamePre) { - m.Name = fmt.Sprintf(kvBucketNameTmpl, m.Name) - } - scfg.Mirror = m - scfg.MirrorDirect = true - } else if len(cfg.Sources) > 0 { - // For now we do not allow direct subjects for sources. If that is desired a user could use stream API directly. - for _, ss := range cfg.Sources { - var sourceBucketName string - if strings.HasPrefix(ss.Name, kvBucketNamePre) { - sourceBucketName = ss.Name[len(kvBucketNamePre):] - } else { - sourceBucketName = ss.Name - ss.Name = fmt.Sprintf(kvBucketNameTmpl, ss.Name) - } - - if ss.External == nil || sourceBucketName != cfg.Bucket { - ss.SubjectTransforms = []SubjectTransformConfig{{Source: fmt.Sprintf(kvSubjectsTmpl, sourceBucketName), Destination: fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)}} - } - scfg.Sources = append(scfg.Sources, ss) - } - scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)} - } else { - scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)} - } - - return scfg, nil -} - -// DeleteKeyValue will delete this KeyValue store (JetStream stream). -func (js *jetStream) DeleteKeyValue(ctx context.Context, bucket string) error { - if !validBucketRe.MatchString(bucket) { - return ErrInvalidBucketName - } - stream := fmt.Sprintf(kvBucketNameTmpl, bucket) - if err := js.DeleteStream(ctx, stream); err != nil { - if errors.Is(err, ErrStreamNotFound) { - err = errors.Join(fmt.Errorf("%w: %s", ErrBucketNotFound, bucket), err) - } - return err - } - return nil -} - -// KeyValueStoreNames is used to retrieve a list of key value store names -func (js *jetStream) KeyValueStoreNames(ctx context.Context) KeyValueNamesLister { - res := &kvLister{ - kvNames: make(chan string), - } - l := &streamLister{js: js} - streamsReq := streamsRequest{ - Subject: fmt.Sprintf(kvSubjectsTmpl, "*"), - } - go func() { - defer close(res.kvNames) - for { - page, err := l.streamNames(ctx, streamsReq) - if err != nil && !errors.Is(err, ErrEndOfData) { - res.err = err - return - } - for _, name := range page { - if !strings.HasPrefix(name, kvBucketNamePre) { - continue - } - res.kvNames <- strings.TrimPrefix(name, kvBucketNamePre) - } - if errors.Is(err, ErrEndOfData) { - return - } - } - }() - return res -} - -// KeyValueStores is used to retrieve a list of key value store statuses -func (js *jetStream) KeyValueStores(ctx context.Context) KeyValueLister { - res := &kvLister{ - kvs: make(chan KeyValueStatus), - } - l := &streamLister{js: js} - streamsReq := streamsRequest{ - Subject: fmt.Sprintf(kvSubjectsTmpl, "*"), - } - go func() { - defer close(res.kvs) - for { - page, err := l.streamInfos(ctx, streamsReq) - if err != nil && !errors.Is(err, ErrEndOfData) { - res.err = err - return - } - for _, info := range page { - if !strings.HasPrefix(info.Config.Name, kvBucketNamePre) { - continue - } - res.kvs <- &KeyValueBucketStatus{nfo: info, bucket: strings.TrimPrefix(info.Config.Name, kvBucketNamePre)} - } - if errors.Is(err, ErrEndOfData) { - return - } - } - }() - return res -} - -// KeyValueBucketStatus represents status of a Bucket, implements KeyValueStatus -type KeyValueBucketStatus struct { - nfo *StreamInfo - bucket string -} - -// Bucket the name of the bucket -func (s *KeyValueBucketStatus) Bucket() string { return s.bucket } - -// Values is how many messages are in the bucket, including historical values -func (s *KeyValueBucketStatus) Values() uint64 { return s.nfo.State.Msgs } - -// History returns the configured history kept per key -func (s *KeyValueBucketStatus) History() int64 { return s.nfo.Config.MaxMsgsPerSubject } - -// TTL is how long the bucket keeps values for -func (s *KeyValueBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge } - -// BackingStore indicates what technology is used for storage of the bucket -func (s *KeyValueBucketStatus) BackingStore() string { return "JetStream" } - -// StreamInfo is the stream info retrieved to create the status -func (s *KeyValueBucketStatus) StreamInfo() *StreamInfo { return s.nfo } - -// Bytes is the size of the stream -func (s *KeyValueBucketStatus) Bytes() uint64 { return s.nfo.State.Bytes } - -// IsCompressed indicates if the data is compressed on disk -func (s *KeyValueBucketStatus) IsCompressed() bool { return s.nfo.Config.Compression != NoCompression } - -type kvLister struct { - kvs chan KeyValueStatus - kvNames chan string - err error -} - -func (kl *kvLister) Status() <-chan KeyValueStatus { - return kl.kvs -} - -func (kl *kvLister) Name() <-chan string { - return kl.kvNames -} - -func (kl *kvLister) Error() error { - return kl.err -} - -func (js *jetStream) legacyJetStream() (nats.JetStreamContext, error) { - opts := make([]nats.JSOpt, 0) - if js.apiPrefix != "" { - opts = append(opts, nats.APIPrefix(js.apiPrefix)) - } - if js.clientTrace != nil { - opts = append(opts, nats.ClientTrace{ - RequestSent: js.clientTrace.RequestSent, - ResponseReceived: js.clientTrace.ResponseReceived, - }) - } - return js.conn.JetStream(opts...) -} - -func keyValid(key string) bool { - if len(key) == 0 || key[0] == '.' || key[len(key)-1] == '.' { - return false - } - return validKeyRe.MatchString(key) -} - -func (kv *kvs) get(ctx context.Context, key string, revision uint64) (KeyValueEntry, error) { - if !keyValid(key) { - return nil, ErrInvalidKey - } - - var b strings.Builder - b.WriteString(kv.pre) - b.WriteString(key) - - var m *RawStreamMsg - var err error - - if revision == kvLatestRevision { - m, err = kv.stream.GetLastMsgForSubject(ctx, b.String()) - } else { - m, err = kv.stream.GetMsg(ctx, revision) - // If a sequence was provided, just make sure that the retrieved - // message subject matches the request. - if err == nil && m.Subject != b.String() { - return nil, ErrKeyNotFound - } - } - if err != nil { - if errors.Is(err, ErrMsgNotFound) { - err = ErrKeyNotFound - } - return nil, err - } - - entry := &kve{ - bucket: kv.name, - key: key, - value: m.Data, - revision: m.Sequence, - created: m.Time, - } - - // Double check here that this is not a DEL Operation marker. - if len(m.Header) > 0 { - switch m.Header.Get(kvop) { - case kvdel: - entry.op = KeyValueDelete - return entry, ErrKeyDeleted - case kvpurge: - entry.op = KeyValuePurge - return entry, ErrKeyDeleted - } - } - - return entry, nil -} - -// kve is the implementation of KeyValueEntry -type kve struct { - bucket string - key string - value []byte - revision uint64 - delta uint64 - created time.Time - op KeyValueOp -} - -func (e *kve) Bucket() string { return e.bucket } -func (e *kve) Key() string { return e.key } -func (e *kve) Value() []byte { return e.value } -func (e *kve) Revision() uint64 { return e.revision } -func (e *kve) Created() time.Time { return e.created } -func (e *kve) Delta() uint64 { return e.delta } -func (e *kve) Operation() KeyValueOp { return e.op } - -// Get returns the latest value for the key. -func (kv *kvs) Get(ctx context.Context, key string) (KeyValueEntry, error) { - e, err := kv.get(ctx, key, kvLatestRevision) - if err != nil { - if errors.Is(err, ErrKeyDeleted) { - return nil, ErrKeyNotFound - } - return nil, err - } - - return e, nil -} - -// GetRevision returns a specific revision value for the key. -func (kv *kvs) GetRevision(ctx context.Context, key string, revision uint64) (KeyValueEntry, error) { - e, err := kv.get(ctx, key, revision) - if err != nil { - if errors.Is(err, ErrKeyDeleted) { - return nil, ErrKeyNotFound - } - return nil, err - } - - return e, nil -} - -// Put will place the new value for the key into the store. -func (kv *kvs) Put(ctx context.Context, key string, value []byte) (uint64, error) { - if !keyValid(key) { - return 0, ErrInvalidKey - } - - var b strings.Builder - if kv.useJSPfx { - b.WriteString(kv.js.apiPrefix) - } - if kv.putPre != "" { - b.WriteString(kv.putPre) - } else { - b.WriteString(kv.pre) - } - b.WriteString(key) - - pa, err := kv.js.Publish(ctx, b.String(), value) - if err != nil { - return 0, err - } - return pa.Sequence, err -} - -// PutString will place the string for the key into the store. -func (kv *kvs) PutString(ctx context.Context, key string, value string) (uint64, error) { - return kv.Put(ctx, key, []byte(value)) -} - -// Create will add the key/value pair iff it does not exist. -func (kv *kvs) Create(ctx context.Context, key string, value []byte) (revision uint64, err error) { - v, err := kv.Update(ctx, key, value, 0) - if err == nil { - return v, nil - } - - if e, err := kv.get(ctx, key, kvLatestRevision); errors.Is(err, ErrKeyDeleted) { - return kv.Update(ctx, key, value, e.Revision()) - } - - // Check if the expected last subject sequence is not zero which implies - // the key already exists. - if errors.Is(err, ErrKeyExists) { - jserr := ErrKeyExists.(*jsError) - return 0, fmt.Errorf("%w: %s", err, jserr.message) - } - - return 0, err -} - -// Update will update the value if the latest revision matches. -func (kv *kvs) Update(ctx context.Context, key string, value []byte, revision uint64) (uint64, error) { - if !keyValid(key) { - return 0, ErrInvalidKey - } - - var b strings.Builder - if kv.useJSPfx { - b.WriteString(kv.js.apiPrefix) - } - b.WriteString(kv.pre) - b.WriteString(key) - - m := nats.Msg{Subject: b.String(), Header: nats.Header{}, Data: value} - m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(revision, 10)) - - pa, err := kv.js.PublishMsg(ctx, &m) - if err != nil { - return 0, err - } - return pa.Sequence, err -} - -// Delete will place a delete marker and leave all revisions. -func (kv *kvs) Delete(ctx context.Context, key string, opts ...KVDeleteOpt) error { - if !keyValid(key) { - return ErrInvalidKey - } - - var b strings.Builder - if kv.useJSPfx { - b.WriteString(kv.js.apiPrefix) - } - if kv.putPre != "" { - b.WriteString(kv.putPre) - } else { - b.WriteString(kv.pre) - } - b.WriteString(key) - - // DEL op marker. For watch functionality. - m := nats.NewMsg(b.String()) - - var o deleteOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureDelete(&o); err != nil { - return err - } - } - } - - if o.purge { - m.Header.Set(kvop, kvpurge) - m.Header.Set(MsgRollup, MsgRollupSubject) - } else { - m.Header.Set(kvop, kvdel) - } - - if o.revision != 0 { - m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(o.revision, 10)) - } - - _, err := kv.js.PublishMsg(ctx, m) - return err -} - -// Purge will place a delete marker and remove all previous revisions. -func (kv *kvs) Purge(ctx context.Context, key string, opts ...KVDeleteOpt) error { - return kv.Delete(ctx, key, append(opts, purge())...) -} - -// purge removes all previous revisions. -func purge() KVDeleteOpt { - return deleteOptFn(func(opts *deleteOpts) error { - opts.purge = true - return nil - }) -} - -// Implementation for Watch -type watcher struct { - mu sync.Mutex - updates chan KeyValueEntry - sub *nats.Subscription - initDone bool - initPending uint64 - received uint64 -} - -// Updates returns the interior channel. -func (w *watcher) Updates() <-chan KeyValueEntry { - if w == nil { - return nil - } - return w.updates -} - -// Stop will unsubscribe from the watcher. -func (w *watcher) Stop() error { - if w == nil { - return nil - } - return w.sub.Unsubscribe() -} - -// Watch for any updates to keys that match the keys argument which could include wildcards. -// Watch will send a nil entry when it has received all initial values. -func (kv *kvs) Watch(ctx context.Context, keys string, opts ...WatchOpt) (KeyWatcher, error) { - var o watchOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureWatcher(&o); err != nil { - return nil, err - } - } - } - - // Could be a pattern so don't check for validity as we normally do. - var b strings.Builder - b.WriteString(kv.pre) - b.WriteString(keys) - keys = b.String() - - // We will block below on placing items on the chan. That is by design. - w := &watcher{updates: make(chan KeyValueEntry, 256)} - - update := func(m *nats.Msg) { - tokens, err := parser.GetMetadataFields(m.Reply) - if err != nil { - return - } - if len(m.Subject) <= len(kv.pre) { - return - } - subj := m.Subject[len(kv.pre):] - - var op KeyValueOp - if len(m.Header) > 0 { - switch m.Header.Get(kvop) { - case kvdel: - op = KeyValueDelete - case kvpurge: - op = KeyValuePurge - } - } - delta := parser.ParseNum(tokens[parser.AckNumPendingTokenPos]) - w.mu.Lock() - defer w.mu.Unlock() - if !o.ignoreDeletes || (op != KeyValueDelete && op != KeyValuePurge) { - entry := &kve{ - bucket: kv.name, - key: subj, - value: m.Data, - revision: parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]), - created: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))), - delta: delta, - op: op, - } - w.updates <- entry - } - // Check if done and initial values. - if !w.initDone { - w.received++ - // We set this on the first trip through.. - if w.initPending == 0 { - w.initPending = delta - } - if w.received > w.initPending || delta == 0 { - w.initDone = true - w.updates <- nil - } - } - } - - // Used ordered consumer to deliver results. - subOpts := []nats.SubOpt{nats.BindStream(kv.streamName), nats.OrderedConsumer()} - if !o.includeHistory { - subOpts = append(subOpts, nats.DeliverLastPerSubject()) - } - if o.updatesOnly { - subOpts = append(subOpts, nats.DeliverNew()) - } - if o.metaOnly { - subOpts = append(subOpts, nats.HeadersOnly()) - } - if o.resumeFromRevision > 0 { - subOpts = append(subOpts, nats.StartSequence(o.resumeFromRevision)) - } - subOpts = append(subOpts, nats.Context(ctx)) - // Create the sub and rest of initialization under the lock. - // We want to prevent the race between this code and the - // update() callback. - w.mu.Lock() - defer w.mu.Unlock() - sub, err := kv.pushJS.Subscribe(keys, update, subOpts...) - if err != nil { - return nil, err - } - sub.SetClosedHandler(func(_ string) { - close(w.updates) - }) - // If there were no pending messages at the time of the creation - // of the consumer, send the marker. - // Skip if UpdatesOnly() is set, since there will never be updates initially. - if !o.updatesOnly { - initialPending, err := sub.InitialConsumerPending() - if err == nil && initialPending == 0 { - w.initDone = true - w.updates <- nil - } - } else { - // if UpdatesOnly was used, mark initialization as complete - w.initDone = true - } - w.sub = sub - return w, nil -} - -// WatchAll will invoke the callback for all updates. -func (kv *kvs) WatchAll(ctx context.Context, opts ...WatchOpt) (KeyWatcher, error) { - return kv.Watch(ctx, AllKeys, opts...) -} - -// Keys will return all keys. -func (kv *kvs) Keys(ctx context.Context, opts ...WatchOpt) ([]string, error) { - opts = append(opts, IgnoreDeletes(), MetaOnly()) - watcher, err := kv.WatchAll(ctx, opts...) - if err != nil { - return nil, err - } - defer watcher.Stop() - - var keys []string - for entry := range watcher.Updates() { - if entry == nil { - break - } - keys = append(keys, entry.Key()) - } - if len(keys) == 0 { - return nil, ErrNoKeysFound - } - return keys, nil -} - -type keyLister struct { - watcher KeyWatcher - keys chan string -} - -// Keys will return all keys. -func (kv *kvs) ListKeys(ctx context.Context, opts ...WatchOpt) (KeyLister, error) { - opts = append(opts, IgnoreDeletes(), MetaOnly()) - watcher, err := kv.WatchAll(ctx, opts...) - if err != nil { - return nil, err - } - kl := &keyLister{watcher: watcher, keys: make(chan string, 256)} - - go func() { - defer close(kl.keys) - defer watcher.Stop() - for { - select { - case entry := <-watcher.Updates(): - if entry == nil { - return - } - kl.keys <- entry.Key() - case <-ctx.Done(): - return - } - } - }() - return kl, nil -} - -func (kl *keyLister) Keys() <-chan string { - return kl.keys -} - -func (kl *keyLister) Stop() error { - return kl.watcher.Stop() -} - -// History will return all historical values for the key. -func (kv *kvs) History(ctx context.Context, key string, opts ...WatchOpt) ([]KeyValueEntry, error) { - opts = append(opts, IncludeHistory()) - watcher, err := kv.Watch(ctx, key, opts...) - if err != nil { - return nil, err - } - defer watcher.Stop() - - var entries []KeyValueEntry - for entry := range watcher.Updates() { - if entry == nil { - break - } - entries = append(entries, entry) - } - if len(entries) == 0 { - return nil, ErrKeyNotFound - } - return entries, nil -} - -// Bucket returns the current bucket name. -func (kv *kvs) Bucket() string { - return kv.name -} - -const kvDefaultPurgeDeletesMarkerThreshold = 30 * time.Minute - -// PurgeDeletes will remove all current delete markers. -func (kv *kvs) PurgeDeletes(ctx context.Context, opts ...KVPurgeOpt) error { - var o purgeOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configurePurge(&o); err != nil { - return err - } - } - } - watcher, err := kv.WatchAll(ctx) - if err != nil { - return err - } - defer watcher.Stop() - - var limit time.Time - olderThan := o.dmthr - // Negative value is used to instruct to always remove markers, regardless - // of age. If set to 0 (or not set), use our default value. - if olderThan == 0 { - olderThan = kvDefaultPurgeDeletesMarkerThreshold - } - if olderThan > 0 { - limit = time.Now().Add(-olderThan) - } - - var deleteMarkers []KeyValueEntry - for entry := range watcher.Updates() { - if entry == nil { - break - } - if op := entry.Operation(); op == KeyValueDelete || op == KeyValuePurge { - deleteMarkers = append(deleteMarkers, entry) - } - } - - var b strings.Builder - // Do actual purges here. - for _, entry := range deleteMarkers { - b.WriteString(kv.pre) - b.WriteString(entry.Key()) - purgeOpts := []StreamPurgeOpt{WithPurgeSubject(b.String())} - if olderThan > 0 && entry.Created().After(limit) { - purgeOpts = append(purgeOpts, WithPurgeKeep(1)) - } - if err := kv.stream.Purge(ctx, purgeOpts...); err != nil { - return err - } - b.Reset() - } - return nil -} - -// Status retrieves the status and configuration of a bucket -func (kv *kvs) Status(ctx context.Context) (KeyValueStatus, error) { - nfo, err := kv.stream.Info(ctx) - if err != nil { - return nil, err - } - - return &KeyValueBucketStatus{nfo: nfo, bucket: kv.name}, nil -} - -func mapStreamToKVS(js *jetStream, pushJS nats.JetStreamContext, stream Stream) *kvs { - info := stream.CachedInfo() - bucket := strings.TrimPrefix(info.Config.Name, kvBucketNamePre) - kv := &kvs{ - name: bucket, - streamName: info.Config.Name, - pre: fmt.Sprintf(kvSubjectsPreTmpl, bucket), - js: js, - pushJS: pushJS, - stream: stream, - // Determine if we need to use the JS prefix in front of Put and Delete operations - useJSPfx: js.apiPrefix != DefaultAPIPrefix, - useDirect: info.Config.AllowDirect, - } - - // If we are mirroring, we will have mirror direct on, so just use the mirror name - // and override use - if m := info.Config.Mirror; m != nil { - bucket := strings.TrimPrefix(m.Name, kvBucketNamePre) - if m.External != nil && m.External.APIPrefix != "" { - kv.useJSPfx = false - kv.pre = fmt.Sprintf(kvSubjectsPreTmpl, bucket) - kv.putPre = fmt.Sprintf(kvSubjectsPreDomainTmpl, m.External.APIPrefix, bucket) - } else { - kv.putPre = fmt.Sprintf(kvSubjectsPreTmpl, bucket) - } - } - - return kv -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/kv_options.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/kv_options.go deleted file mode 100644 index 07a2557..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/kv_options.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2024 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jetstream - -import ( - "fmt" - "time" -) - -type watchOptFn func(opts *watchOpts) error - -func (opt watchOptFn) configureWatcher(opts *watchOpts) error { - return opt(opts) -} - -// IncludeHistory instructs the key watcher to include historical values as -// well (up to KeyValueMaxHistory). -func IncludeHistory() WatchOpt { - return watchOptFn(func(opts *watchOpts) error { - if opts.updatesOnly { - return fmt.Errorf("%w: include history can not be used with updates only", ErrInvalidOption) - } - opts.includeHistory = true - return nil - }) -} - -// UpdatesOnly instructs the key watcher to only include updates on values -// (without latest values when started). -func UpdatesOnly() WatchOpt { - return watchOptFn(func(opts *watchOpts) error { - if opts.includeHistory { - return fmt.Errorf("%w: updates only can not be used with include history", ErrInvalidOption) - } - opts.updatesOnly = true - return nil - }) -} - -// IgnoreDeletes will have the key watcher not pass any deleted keys. -func IgnoreDeletes() WatchOpt { - return watchOptFn(func(opts *watchOpts) error { - opts.ignoreDeletes = true - return nil - }) -} - -// MetaOnly instructs the key watcher to retrieve only the entry meta data, not -// the entry value. -func MetaOnly() WatchOpt { - return watchOptFn(func(opts *watchOpts) error { - opts.metaOnly = true - return nil - }) -} - -// ResumeFromRevision instructs the key watcher to resume from a specific -// revision number. -func ResumeFromRevision(revision uint64) WatchOpt { - return watchOptFn(func(opts *watchOpts) error { - opts.resumeFromRevision = revision - return nil - }) -} - -// DeleteMarkersOlderThan indicates that delete or purge markers older than that -// will be deleted as part of [KeyValue.PurgeDeletes] operation, otherwise, only the data -// will be removed but markers that are recent will be kept. -// Note that if no option is specified, the default is 30 minutes. You can set -// this option to a negative value to instruct to always remove the markers, -// regardless of their age. -type DeleteMarkersOlderThan time.Duration - -func (ttl DeleteMarkersOlderThan) configurePurge(opts *purgeOpts) error { - opts.dmthr = time.Duration(ttl) - return nil -} - -type deleteOptFn func(opts *deleteOpts) error - -func (opt deleteOptFn) configureDelete(opts *deleteOpts) error { - return opt(opts) -} - -// LastRevision deletes if the latest revision matches the provided one. If the -// provided revision is not the latest, the delete will return an error. -func LastRevision(revision uint64) KVDeleteOpt { - return deleteOptFn(func(opts *deleteOpts) error { - opts.revision = revision - return nil - }) -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/message.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/message.go deleted file mode 100644 index 81e1512..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/message.go +++ /dev/null @@ -1,457 +0,0 @@ -// Copyright 2022-2024 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jetstream - -import ( - "bytes" - "context" - "fmt" - "strconv" - "strings" - "sync" - "time" - - "github.com/nats-io/nats.go" - "github.com/nats-io/nats.go/internal/parser" -) - -type ( - // Msg contains methods to operate on a JetStream message. Metadata, Data, - // Headers, Subject and Reply can be used to retrieve the specific parts of - // the underlying message. Ack, DoubleAck, Nak, NakWithDelay, InProgress and - // Term are various flavors of ack requests. - Msg interface { - // Metadata returns [MsgMetadata] for a JetStream message. - Metadata() (*MsgMetadata, error) - - // Data returns the message body. - Data() []byte - - // Headers returns a map of headers for a message. - Headers() nats.Header - - // Subject returns a subject on which a message was published/received. - Subject() string - - // Reply returns a reply subject for a message. - Reply() string - - // Ack acknowledges a message. This tells the server that the message was - // successfully processed and it can move on to the next message. - Ack() error - - // DoubleAck acknowledges a message and waits for ack reply from the server. - // While it impacts performance, it is useful for scenarios where - // message loss is not acceptable. - DoubleAck(context.Context) error - - // Nak negatively acknowledges a message. This tells the server to - // redeliver the message. - // - // Nak does not adhere to AckWait or Backoff configured on the consumer - // and triggers instant redelivery. For a delayed redelivery, use - // NakWithDelay. - Nak() error - - // NakWithDelay negatively acknowledges a message. This tells the server - // to redeliver the message after the given delay. - NakWithDelay(delay time.Duration) error - - // InProgress tells the server that this message is being worked on. It - // resets the redelivery timer on the server. - InProgress() error - - // Term tells the server to not redeliver this message, regardless of - // the value of MaxDeliver. - Term() error - - // TermWithReason tells the server to not redeliver this message, regardless of - // the value of MaxDeliver. The provided reason will be included in JetStream - // advisory event sent by the server. - // - // Note: This will only work with JetStream servers >= 2.10.4. - // For older servers, TermWithReason will be ignored by the server and the message - // will not be terminated. - TermWithReason(reason string) error - } - - // MsgMetadata is the JetStream metadata associated with received messages. - MsgMetadata struct { - // Sequence is the sequence information for the message. - Sequence SequencePair - - // NumDelivered is the number of times this message was delivered to the - // consumer. - NumDelivered uint64 - - // NumPending is the number of messages that match the consumer's - // filter, but have not been delivered yet. - NumPending uint64 - - // Timestamp is the time the message was originally stored on a stream. - Timestamp time.Time - - // Stream is the stream name this message is stored on. - Stream string - - // Consumer is the consumer name this message was delivered to. - Consumer string - - // Domain is the domain this message was received on. - Domain string - } - - // SequencePair includes the consumer and stream sequence numbers for a - // message. - SequencePair struct { - // Consumer is the consumer sequence number for message deliveries. This - // is the total number of messages the consumer has seen (including - // redeliveries). - Consumer uint64 `json:"consumer_seq"` - - // Stream is the stream sequence number for a message. - Stream uint64 `json:"stream_seq"` - } - - jetStreamMsg struct { - msg *nats.Msg - ackd bool - js *jetStream - sync.Mutex - } - - ackOpts struct { - nakDelay time.Duration - termReason string - } - - ackType []byte -) - -const ( - controlMsg = "100" - badRequest = "400" - noMessages = "404" - reqTimeout = "408" - maxBytesExceeded = "409" - noResponders = "503" -) - -// Headers used when publishing messages. -const ( - // MsgIdHeader is used to specify a user-defined message ID. It can be used - // e.g. for deduplication in conjunction with the Duplicates duration on - // ConsumerConfig or to provide optimistic concurrency safety together with - // [ExpectedLastMsgIDHeader]. - // - // This can be set when publishing messages using [WithMsgID] option. - MsgIDHeader = "Nats-Msg-Id" - - // ExpectedStreamHeader contains stream name and is used to assure that the - // published message is received by expected stream. Server will reject the - // message if it is not the case. - // - // This can be set when publishing messages using [WithExpectStream] option. - ExpectedStreamHeader = "Nats-Expected-Stream" - - // ExpectedLastSeqHeader contains the expected last sequence number of the - // stream and can be used to apply optimistic concurrency control at stream - // level. Server will reject the message if it is not the case. - // - // This can be set when publishing messages using [WithExpectLastSequence] - // option. option. - ExpectedLastSeqHeader = "Nats-Expected-Last-Sequence" - - // ExpectedLastSubjSeqHeader contains the expected last sequence number on - // the subject and can be used to apply optimistic concurrency control at - // subject level. Server will reject the message if it is not the case. - // - // This can be set when publishing messages using - // [WithExpectLastSequencePerSubject] option. - ExpectedLastSubjSeqHeader = "Nats-Expected-Last-Subject-Sequence" - - // ExpectedLastMsgIDHeader contains the expected last message ID on the - // subject and can be used to apply optimistic concurrency control at - // stream level. Server will reject the message if it is not the case. - // - // This can be set when publishing messages using [WithExpectLastMsgID] - // option. - ExpectedLastMsgIDHeader = "Nats-Expected-Last-Msg-Id" - - // MsgRollup is used to apply a purge of all prior messages in the stream - // ("all") or at the subject ("sub") before this message. - MsgRollup = "Nats-Rollup" -) - -// Headers for republished messages and direct gets. Those headers are set by -// the server and should not be set by the client. -const ( - // StreamHeader contains the stream name the message was republished from or - // the stream name the message was retrieved from using direct get. - StreamHeader = "Nats-Stream" - - // SequenceHeader contains the original sequence number of the message. - SequenceHeader = "Nats-Sequence" - - // TimeStampHeader contains the original timestamp of the message. - TimeStampHeaer = "Nats-Time-Stamp" - - // SubjectHeader contains the original subject the message was published to. - SubjectHeader = "Nats-Subject" - - // LastSequenceHeader contains the last sequence of the message having the - // same subject, otherwise zero if this is the first message for the - // subject. - LastSequenceHeader = "Nats-Last-Sequence" -) - -// Rollups, can be subject only or all messages. -const ( - // MsgRollupSubject is used to purge all messages before this message on the - // message subject. - MsgRollupSubject = "sub" - - // MsgRollupAll is used to purge all messages before this message on the - // stream. - MsgRollupAll = "all" -) - -var ( - ackAck ackType = []byte("+ACK") - ackNak ackType = []byte("-NAK") - ackProgress ackType = []byte("+WPI") - ackTerm ackType = []byte("+TERM") -) - -// Metadata returns [MsgMetadata] for a JetStream message. -func (m *jetStreamMsg) Metadata() (*MsgMetadata, error) { - if err := m.checkReply(); err != nil { - return nil, err - } - - tokens, err := parser.GetMetadataFields(m.msg.Reply) - if err != nil { - return nil, fmt.Errorf("%w: %s", ErrNotJSMessage, err) - } - - meta := &MsgMetadata{ - Domain: tokens[parser.AckDomainTokenPos], - NumDelivered: parser.ParseNum(tokens[parser.AckNumDeliveredTokenPos]), - NumPending: parser.ParseNum(tokens[parser.AckNumPendingTokenPos]), - Timestamp: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))), - Stream: tokens[parser.AckStreamTokenPos], - Consumer: tokens[parser.AckConsumerTokenPos], - } - meta.Sequence.Stream = parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]) - meta.Sequence.Consumer = parser.ParseNum(tokens[parser.AckConsumerSeqTokenPos]) - return meta, nil -} - -// Data returns the message body. -func (m *jetStreamMsg) Data() []byte { - return m.msg.Data -} - -// Headers returns a map of headers for a message. -func (m *jetStreamMsg) Headers() nats.Header { - return m.msg.Header -} - -// Subject returns a subject on which a message is published. -func (m *jetStreamMsg) Subject() string { - return m.msg.Subject -} - -// Reply returns a reply subject for a JetStream message. -func (m *jetStreamMsg) Reply() string { - return m.msg.Reply -} - -// Ack acknowledges a message. This tells the server that the message was -// successfully processed and it can move on to the next message. -func (m *jetStreamMsg) Ack() error { - return m.ackReply(context.Background(), ackAck, false, ackOpts{}) -} - -// DoubleAck acknowledges a message and waits for ack reply from the server. -// While it impacts performance, it is useful for scenarios where -// message loss is not acceptable. -func (m *jetStreamMsg) DoubleAck(ctx context.Context) error { - return m.ackReply(ctx, ackAck, true, ackOpts{}) -} - -// Nak negatively acknowledges a message. This tells the server to -// redeliver the message. -func (m *jetStreamMsg) Nak() error { - return m.ackReply(context.Background(), ackNak, false, ackOpts{}) -} - -// NakWithDelay negatively acknowledges a message. This tells the server -// to redeliver the message after the given delay. -func (m *jetStreamMsg) NakWithDelay(delay time.Duration) error { - return m.ackReply(context.Background(), ackNak, false, ackOpts{nakDelay: delay}) -} - -// InProgress tells the server that this message is being worked on. It -// resets the redelivery timer on the server. -func (m *jetStreamMsg) InProgress() error { - return m.ackReply(context.Background(), ackProgress, false, ackOpts{}) -} - -// Term tells the server to not redeliver this message, regardless of -// the value of MaxDeliver. -func (m *jetStreamMsg) Term() error { - return m.ackReply(context.Background(), ackTerm, false, ackOpts{}) -} - -// TermWithReason tells the server to not redeliver this message, regardless of -// the value of MaxDeliver. The provided reason will be included in JetStream -// advisory event sent by the server. -// -// Note: This will only work with JetStream servers >= 2.10.4. -// For older servers, TermWithReason will be ignored by the server and the message -// will not be terminated. -func (m *jetStreamMsg) TermWithReason(reason string) error { - return m.ackReply(context.Background(), ackTerm, false, ackOpts{termReason: reason}) -} - -func (m *jetStreamMsg) ackReply(ctx context.Context, ackType ackType, sync bool, opts ackOpts) error { - err := m.checkReply() - if err != nil { - return err - } - - m.Lock() - if m.ackd { - m.Unlock() - return ErrMsgAlreadyAckd - } - m.Unlock() - - if sync { - var cancel context.CancelFunc - ctx, cancel = wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - } - - var body []byte - if opts.nakDelay > 0 { - body = []byte(fmt.Sprintf("%s {\"delay\": %d}", ackType, opts.nakDelay.Nanoseconds())) - } else if opts.termReason != "" { - body = []byte(fmt.Sprintf("%s %s", ackType, opts.termReason)) - } else { - body = ackType - } - - if sync { - _, err = m.js.conn.RequestWithContext(ctx, m.msg.Reply, body) - } else { - err = m.js.conn.Publish(m.msg.Reply, body) - } - if err != nil { - return err - } - - // Mark that the message has been acked unless it is ackProgress - // which can be sent many times. - if !bytes.Equal(ackType, ackProgress) { - m.Lock() - m.ackd = true - m.Unlock() - } - return nil -} - -func (m *jetStreamMsg) checkReply() error { - if m == nil || m.msg.Sub == nil { - return ErrMsgNotBound - } - if m.msg.Reply == "" { - return ErrMsgNoReply - } - return nil -} - -// Returns if the given message is a user message or not, and if -// checkSts() is true, returns appropriate error based on the -// content of the status (404, etc..) -func checkMsg(msg *nats.Msg) (bool, error) { - // If payload or no header, consider this a user message - if len(msg.Data) > 0 || len(msg.Header) == 0 { - return true, nil - } - // Look for status header - val := msg.Header.Get("Status") - descr := msg.Header.Get("Description") - // If not present, then this is considered a user message - if val == "" { - return true, nil - } - - switch val { - case badRequest: - return false, ErrBadRequest - case noResponders: - return false, nats.ErrNoResponders - case noMessages: - // 404 indicates that there are no messages. - return false, ErrNoMessages - case reqTimeout: - return false, nats.ErrTimeout - case controlMsg: - return false, nil - case maxBytesExceeded: - if strings.Contains(strings.ToLower(descr), "message size exceeds maxbytes") { - return false, ErrMaxBytesExceeded - } - if strings.Contains(strings.ToLower(descr), "consumer deleted") { - return false, ErrConsumerDeleted - } - if strings.Contains(strings.ToLower(descr), "leadership change") { - return false, ErrConsumerLeadershipChanged - } - } - return false, fmt.Errorf("nats: %s", msg.Header.Get("Description")) -} - -func parsePending(msg *nats.Msg) (int, int, error) { - msgsLeftStr := msg.Header.Get("Nats-Pending-Messages") - var msgsLeft int - var err error - if msgsLeftStr != "" { - msgsLeft, err = strconv.Atoi(msgsLeftStr) - if err != nil { - return 0, 0, fmt.Errorf("nats: invalid format of Nats-Pending-Messages") - } - } - bytesLeftStr := msg.Header.Get("Nats-Pending-Bytes") - var bytesLeft int - if bytesLeftStr != "" { - bytesLeft, err = strconv.Atoi(bytesLeftStr) - if err != nil { - return 0, 0, fmt.Errorf("nats: invalid format of Nats-Pending-Bytes") - } - } - return msgsLeft, bytesLeft, nil -} - -// toJSMsg converts core [nats.Msg] to [jetStreamMsg], exposing JetStream-specific operations -func (js *jetStream) toJSMsg(msg *nats.Msg) *jetStreamMsg { - return &jetStreamMsg{ - msg: msg, - js: js, - } -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/object.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/object.go deleted file mode 100644 index 271cc22..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/object.go +++ /dev/null @@ -1,1600 +0,0 @@ -// Copyright 2023-2024 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jetstream - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "hash" - "io" - "net" - "os" - "strings" - "sync" - "time" - - "github.com/nats-io/nats.go" - "github.com/nats-io/nats.go/internal/parser" - "github.com/nats-io/nuid" -) - -type ( - // ObjectStoreManager is used to manage object stores. It provides methods - // CRUD operations on object stores. - ObjectStoreManager interface { - // ObjectStore will look up and bind to an existing object store - // instance. - // - // If the object store with given name does not exist, ErrBucketNotFound - // will be returned. - ObjectStore(ctx context.Context, bucket string) (ObjectStore, error) - - // CreateObjectStore will create a new object store with the given - // configuration. - // - // If the object store with given name already exists, ErrBucketExists - // will be returned. - CreateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) - - // UpdateObjectStore will update an existing object store with the given - // configuration. - // - // If the object store with given name does not exist, ErrBucketNotFound - // will be returned. - UpdateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) - - // CreateOrUpdateObjectStore will create a new object store with the given - // configuration if it does not exist, or update an existing object store - // with the given configuration. - CreateOrUpdateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) - - // DeleteObjectStore will delete the provided object store. - // - // If the object store with given name does not exist, ErrBucketNotFound - // will be returned. - DeleteObjectStore(ctx context.Context, bucket string) error - - // ObjectStoreNames is used to retrieve a list of bucket names. - // It returns an ObjectStoreNamesLister exposing a channel to receive - // the names of the object stores. - // - // The lister will always close the channel when done (either all names - // have been read or an error occurred) and therefore can be used in a - // for-range loop. - ObjectStoreNames(ctx context.Context) ObjectStoreNamesLister - - // ObjectStores is used to retrieve a list of bucket statuses. - // It returns an ObjectStoresLister exposing a channel to receive - // the statuses of the object stores. - // - // The lister will always close the channel when done (either all statuses - // have been read or an error occurred) and therefore can be used in a - // for-range loop. - ObjectStores(ctx context.Context) ObjectStoresLister - } - - // ObjectStore contains methods to operate on an object store. - // Using the ObjectStore interface, it is possible to: - // - // - Perform CRUD operations on objects (Get, Put, Delete). - // Get and put expose convenience methods to work with - // byte slices, strings and files, in addition to streaming [io.Reader] - // - Get information about an object without retrieving it. - // - Update the metadata of an object. - // - Add links to other objects or object stores. - // - Watch for updates to a store - // - List information about objects in a store - // - Retrieve status and configuration of an object store. - ObjectStore interface { - // Put will place the contents from the reader into a new object. If the - // object already exists, it will be overwritten. The object name is - // required and is taken from the ObjectMeta.Name field. - // - // The reader will be read until EOF. ObjectInfo will be returned, containing - // the object's metadata, digest and instance information. - Put(ctx context.Context, obj ObjectMeta, reader io.Reader) (*ObjectInfo, error) - - // PutBytes is convenience function to put a byte slice into this object - // store under the given name. - // - // ObjectInfo will be returned, containing the object's metadata, digest - // and instance information. - PutBytes(ctx context.Context, name string, data []byte) (*ObjectInfo, error) - - // PutString is convenience function to put a string into this object - // store under the given name. - // - // ObjectInfo will be returned, containing the object's metadata, digest - // and instance information. - PutString(ctx context.Context, name string, data string) (*ObjectInfo, error) - - // PutFile is convenience function to put a file contents into this - // object store. The name of the object will be the path of the file. - // - // ObjectInfo will be returned, containing the object's metadata, digest - // and instance information. - PutFile(ctx context.Context, file string) (*ObjectInfo, error) - - // Get will pull the named object from the object store. If the object - // does not exist, ErrObjectNotFound will be returned. - // - // The returned ObjectResult will contain the object's metadata and a - // reader to read the object's contents. The reader will be closed when - // all data has been read or an error occurs. - // - // A GetObjectShowDeleted option can be supplied to return an object - // even if it was marked as deleted. - Get(ctx context.Context, name string, opts ...GetObjectOpt) (ObjectResult, error) - - // GetBytes is a convenience function to pull an object from this object - // store and return it as a byte slice. - // - // If the object does not exist, ErrObjectNotFound will be returned. - // - // A GetObjectShowDeleted option can be supplied to return an object - // even if it was marked as deleted. - GetBytes(ctx context.Context, name string, opts ...GetObjectOpt) ([]byte, error) - - // GetString is a convenience function to pull an object from this - // object store and return it as a string. - // - // If the object does not exist, ErrObjectNotFound will be returned. - // - // A GetObjectShowDeleted option can be supplied to return an object - // even if it was marked as deleted. - GetString(ctx context.Context, name string, opts ...GetObjectOpt) (string, error) - - // GetFile is a convenience function to pull an object from this object - // store and place it in a file. If the file already exists, it will be - // overwritten, otherwise it will be created. - // - // If the object does not exist, ErrObjectNotFound will be returned. - // A GetObjectShowDeleted option can be supplied to return an object - // even if it was marked as deleted. - GetFile(ctx context.Context, name, file string, opts ...GetObjectOpt) error - - // GetInfo will retrieve the current information for the object, containing - // the object's metadata and instance information. - // - // If the object does not exist, ErrObjectNotFound will be returned. - // - // A GetObjectInfoShowDeleted option can be supplied to return an object - // even if it was marked as deleted. - GetInfo(ctx context.Context, name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error) - - // UpdateMeta will update the metadata for the object. - // - // If the object does not exist, ErrUpdateMetaDeleted will be returned. - // If the new name is different from the old name, and an object with the - // new name already exists, ErrObjectAlreadyExists will be returned. - UpdateMeta(ctx context.Context, name string, meta ObjectMeta) error - - // Delete will delete the named object from the object store. If the object - // does not exist, ErrObjectNotFound will be returned. If the object is - // already deleted, no error will be returned. - // - // All chunks for the object will be purged, and the object will be marked - // as deleted. - Delete(ctx context.Context, name string) error - - // AddLink will add a link to another object. A link is a reference to - // another object. The provided name is the name of the link object. - // The provided ObjectInfo is the info of the object being linked to. - // - // If an object with given name already exists, ErrObjectAlreadyExists - // will be returned. - // If object being linked to is deleted, ErrNoLinkToDeleted will be - // returned. - // If the provided object is a link, ErrNoLinkToLink will be returned. - // If the provided object is nil or the name is empty, ErrObjectRequired - // will be returned. - AddLink(ctx context.Context, name string, obj *ObjectInfo) (*ObjectInfo, error) - - // AddBucketLink will add a link to another object store. A link is a - // reference to another object store. The provided name is the name of - // the link object. - // The provided ObjectStore is the object store being linked to. - // - // If an object with given name already exists, ErrObjectAlreadyExists - // will be returned. - // If the provided object store is nil ErrBucketRequired will be returned. - AddBucketLink(ctx context.Context, name string, bucket ObjectStore) (*ObjectInfo, error) - - // Seal will seal the object store, no further modifications will be allowed. - Seal(ctx context.Context) error - - // Watch for any updates to objects in the store. By default, the watcher will send the latest - // info for each object and all future updates. Watch will send a nil - // entry when it has received all initial values. There are a few ways - // to configure the watcher: - // - // - IncludeHistory will have the watcher send all historical information - // for each object. - // - IgnoreDeletes will have the watcher not pass any objects with - // delete markers. - // - UpdatesOnly will have the watcher only pass updates on objects - // (without latest info when started). - Watch(ctx context.Context, opts ...WatchOpt) (ObjectWatcher, error) - - // List will list information about objects in the store. - // - // If the object store is empty, ErrNoObjectsFound will be returned. - List(ctx context.Context, opts ...ListObjectsOpt) ([]*ObjectInfo, error) - - // Status retrieves the status and configuration of the bucket. - Status(ctx context.Context) (ObjectStoreStatus, error) - } - - // ObjectWatcher is what is returned when doing a watch. It can be used to - // retrieve updates to objects in a bucket. If not using UpdatesOnly option, - // it will also send the latest value for each key. After all initial values - // have been sent, a nil entry will be sent. Stop can be used to stop the - // watcher and close the underlying channel. Watcher will not close the - // channel until Stop is called or connection is closed. - ObjectWatcher interface { - Updates() <-chan *ObjectInfo - Stop() error - } - - // ObjectStoreConfig is the configuration for the object store. - ObjectStoreConfig struct { - // Bucket is the name of the object store. Bucket name has to be - // unique and can only contain alphanumeric characters, dashes, and - // underscores. - Bucket string `json:"bucket"` - - // Description is an optional description for the object store. - Description string `json:"description,omitempty"` - - // TTL is the maximum age of objects in the store. If an object is not - // updated within this time, it will be removed from the store. - // By default, objects do not expire. - TTL time.Duration `json:"max_age,omitempty"` - - // MaxBytes is the maximum size of the object store. If not specified, - // the default is -1 (unlimited). - MaxBytes int64 `json:"max_bytes,omitempty"` - - // Storage is the type of storage to use for the object store. If not - // specified, the default is FileStorage. - Storage StorageType `json:"storage,omitempty"` - - // Replicas is the number of replicas to keep for the object store in - // clustered jetstream. Defaults to 1, maximum is 5. - Replicas int `json:"num_replicas,omitempty"` - - // Placement is used to declare where the object store should be placed via - // tags and/or an explicit cluster name. - Placement *Placement `json:"placement,omitempty"` - - // Compression enables the underlying stream compression. - // NOTE: Compression is supported for nats-server 2.10.0+ - Compression bool `json:"compression,omitempty"` - - // Bucket-specific metadata - // NOTE: Metadata requires nats-server v2.10.0+ - Metadata map[string]string `json:"metadata,omitempty"` - } - - // ObjectStoresLister is used to retrieve a list of object stores. It returns - // a channel to read the bucket store statuses from. The lister will always - // close the channel when done (either all stores have been retrieved or an - // error occurred) and therefore can be used in range loops. Stop can be - // used to stop the lister when not all object stores have been read. - ObjectStoresLister interface { - Status() <-chan ObjectStoreStatus - Error() error - } - - // ObjectStoreNamesLister is used to retrieve a list of object store names. - // It returns a channel to read the bucket names from. The lister will - // always close the channel when done (either all stores have been retrieved - // or an error occurred) and therefore can be used in range loops. Stop can - // be used to stop the lister when not all bucket names have been read. - ObjectStoreNamesLister interface { - Name() <-chan string - Error() error - } - - // ObjectStoreStatus is run-time status about a bucket. - ObjectStoreStatus interface { - // Bucket returns the name of the object store. - Bucket() string - - // Description is the description supplied when creating the bucket. - Description() string - - // TTL indicates how long objects are kept in the bucket. - TTL() time.Duration - - // Storage indicates the underlying JetStream storage technology used to - // store data. - Storage() StorageType - - // Replicas indicates how many storage replicas are kept for the data in - // the bucket. - Replicas() int - - // Sealed indicates the stream is sealed and cannot be modified in any - // way. - Sealed() bool - - // Size is the combined size of all data in the bucket including - // metadata, in bytes. - Size() uint64 - - // BackingStore indicates what technology is used for storage of the - // bucket. Currently only JetStream is supported. - BackingStore() string - - // Metadata is the user supplied metadata for the bucket. - Metadata() map[string]string - - // IsCompressed indicates if the data is compressed on disk. - IsCompressed() bool - } - - // ObjectMetaOptions is used to set additional options when creating an object. - ObjectMetaOptions struct { - // Link contains information about a link to another object or object store. - // It should not be set manually, but rather by using the AddLink or - // AddBucketLink methods. - Link *ObjectLink `json:"link,omitempty"` - - // ChunkSize is the maximum size of each chunk in bytes. If not specified, - // the default is 128k. - ChunkSize uint32 `json:"max_chunk_size,omitempty"` - } - - // ObjectMeta is high level information about an object. - ObjectMeta struct { - // Name is the name of the object. The name is required when adding an - // object and has to be unique within the object store. - Name string `json:"name"` - - // Description is an optional description for the object. - Description string `json:"description,omitempty"` - - // Headers is an optional set of user-defined headers for the object. - Headers nats.Header `json:"headers,omitempty"` - - // Metadata is the user supplied metadata for the object. - Metadata map[string]string `json:"metadata,omitempty"` - - // Additional options for the object. - Opts *ObjectMetaOptions `json:"options,omitempty"` - } - - // ObjectInfo contains ObjectMeta and additional information about an - // object. - ObjectInfo struct { - // ObjectMeta contains high level information about the object. - ObjectMeta - - // Bucket is the name of the object store. - Bucket string `json:"bucket"` - - // NUID is the unique identifier for the object set when putting the - // object into the store. - NUID string `json:"nuid"` - - // Size is the size of the object in bytes. It only includes the size of - // the object itself, not the metadata. - Size uint64 `json:"size"` - - // ModTime is the last modification time of the object. - ModTime time.Time `json:"mtime"` - - // Chunks is the number of chunks the object is split into. Maximum size - // of each chunk can be specified in ObjectMetaOptions. - Chunks uint32 `json:"chunks"` - - // Digest is the SHA-256 digest of the object. It is used to verify the - // integrity of the object. - Digest string `json:"digest,omitempty"` - - // Deleted indicates if the object is marked as deleted. - Deleted bool `json:"deleted,omitempty"` - } - - // ObjectLink is used to embed links to other buckets and objects. - ObjectLink struct { - // Bucket is the name of the object store the link is pointing to. - Bucket string `json:"bucket"` - - // Name can be used to link to a single object. - // If empty means this is a link to the whole store, like a directory. - Name string `json:"name,omitempty"` - } - - // ObjectResult will return the object info and a reader to read the object's - // contents. The reader will be closed when all data has been read or an - // error occurs. - ObjectResult interface { - io.ReadCloser - Info() (*ObjectInfo, error) - Error() error - } - - // GetObjectOpt is used to set additional options when getting an object. - GetObjectOpt func(opts *getObjectOpts) error - - // GetObjectInfoOpt is used to set additional options when getting object info. - GetObjectInfoOpt func(opts *getObjectInfoOpts) error - - // ListObjectsOpt is used to set additional options when listing objects. - ListObjectsOpt func(opts *listObjectOpts) error - - getObjectOpts struct { - // Include deleted object in the result. - showDeleted bool - } - - getObjectInfoOpts struct { - // Include deleted object in the result. - showDeleted bool - } - - listObjectOpts struct { - // Include deleted objects in the result channel. - showDeleted bool - } - - obs struct { - name string - streamName string - stream Stream - pushJS nats.JetStreamContext - js *jetStream - } - - // ObjectResult impl. - objResult struct { - sync.Mutex - info *ObjectInfo - r io.ReadCloser - err error - ctx context.Context - digest hash.Hash - } -) - -const ( - objNameTmpl = "OBJ_%s" // OBJ_ // stream name - objAllChunksPreTmpl = "$O.%s.C.>" // $O..C.> // chunk stream subject - objAllMetaPreTmpl = "$O.%s.M.>" // $O..M.> // meta stream subject - objChunksPreTmpl = "$O.%s.C.%s" // $O..C. // chunk message subject - objMetaPreTmpl = "$O.%s.M.%s" // $O..M. // meta message subject - objNoPending = "0" - objDefaultChunkSize = uint32(128 * 1024) // 128k - objDigestType = "SHA-256=" - objDigestTmpl = objDigestType + "%s" -) - -func (js *jetStream) CreateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) { - scfg, err := js.prepareObjectStoreConfig(ctx, cfg) - if err != nil { - return nil, err - } - - stream, err := js.CreateStream(ctx, scfg) - if err != nil { - if errors.Is(err, ErrStreamNameAlreadyInUse) { - // errors are joined so that backwards compatibility is retained - // and previous checks for ErrStreamNameAlreadyInUse will still work. - err = errors.Join(fmt.Errorf("%w: %s", ErrBucketExists, cfg.Bucket), err) - } - return nil, err - } - pushJS, err := js.legacyJetStream() - if err != nil { - return nil, err - } - - return mapStreamToObjectStore(js, pushJS, cfg.Bucket, stream), nil -} - -func (js *jetStream) UpdateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) { - scfg, err := js.prepareObjectStoreConfig(ctx, cfg) - if err != nil { - return nil, err - } - - // Attempt to update the stream. - stream, err := js.UpdateStream(ctx, scfg) - if err != nil { - if errors.Is(err, ErrStreamNotFound) { - return nil, fmt.Errorf("%w: %s", ErrBucketNotFound, cfg.Bucket) - } - return nil, err - } - pushJS, err := js.legacyJetStream() - if err != nil { - return nil, err - } - - return mapStreamToObjectStore(js, pushJS, cfg.Bucket, stream), nil -} - -func (js *jetStream) CreateOrUpdateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) { - scfg, err := js.prepareObjectStoreConfig(ctx, cfg) - if err != nil { - return nil, err - } - - stream, err := js.CreateOrUpdateStream(ctx, scfg) - if err != nil { - return nil, err - } - pushJS, err := js.legacyJetStream() - if err != nil { - return nil, err - } - - return mapStreamToObjectStore(js, pushJS, cfg.Bucket, stream), nil -} - -func (js *jetStream) prepareObjectStoreConfig(ctx context.Context, cfg ObjectStoreConfig) (StreamConfig, error) { - if !validBucketRe.MatchString(cfg.Bucket) { - return StreamConfig{}, ErrInvalidStoreName - } - - name := cfg.Bucket - chunks := fmt.Sprintf(objAllChunksPreTmpl, name) - meta := fmt.Sprintf(objAllMetaPreTmpl, name) - - // We will set explicitly some values so that we can do comparison - // if we get an "already in use" error and need to check if it is same. - // See kv - replicas := cfg.Replicas - if replicas == 0 { - replicas = 1 - } - maxBytes := cfg.MaxBytes - if maxBytes == 0 { - maxBytes = -1 - } - var compression StoreCompression - if cfg.Compression { - compression = S2Compression - } - scfg := StreamConfig{ - Name: fmt.Sprintf(objNameTmpl, name), - Description: cfg.Description, - Subjects: []string{chunks, meta}, - MaxAge: cfg.TTL, - MaxBytes: maxBytes, - Storage: cfg.Storage, - Replicas: replicas, - Placement: cfg.Placement, - Discard: DiscardNew, - AllowRollup: true, - AllowDirect: true, - Metadata: cfg.Metadata, - Compression: compression, - } - - return scfg, nil -} - -// ObjectStore will look up and bind to an existing object store instance. -func (js *jetStream) ObjectStore(ctx context.Context, bucket string) (ObjectStore, error) { - if !validBucketRe.MatchString(bucket) { - return nil, ErrInvalidStoreName - } - - streamName := fmt.Sprintf(objNameTmpl, bucket) - stream, err := js.Stream(ctx, streamName) - if err != nil { - if errors.Is(err, ErrStreamNotFound) { - err = ErrBucketNotFound - } - return nil, err - } - pushJS, err := js.legacyJetStream() - if err != nil { - return nil, err - } - return mapStreamToObjectStore(js, pushJS, bucket, stream), nil -} - -// DeleteObjectStore will delete the underlying stream for the named object. -func (js *jetStream) DeleteObjectStore(ctx context.Context, bucket string) error { - stream := fmt.Sprintf(objNameTmpl, bucket) - return js.DeleteStream(ctx, stream) -} - -func encodeName(name string) string { - return base64.URLEncoding.EncodeToString([]byte(name)) -} - -// Put will place the contents from the reader into this object-store. -func (obs *obs) Put(ctx context.Context, meta ObjectMeta, r io.Reader) (*ObjectInfo, error) { - if meta.Name == "" { - return nil, ErrBadObjectMeta - } - - if meta.Opts == nil { - meta.Opts = &ObjectMetaOptions{ChunkSize: objDefaultChunkSize} - } else if meta.Opts.Link != nil { - return nil, ErrLinkNotAllowed - } else if meta.Opts.ChunkSize == 0 { - meta.Opts.ChunkSize = objDefaultChunkSize - } - - // Create the new nuid so chunks go on a new subject if the name is re-used - newnuid := nuid.Next() - - // These will be used in more than one place - chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, newnuid) - - // Grab existing meta info (einfo). Ok to be found or not found, any other error is a problem - // Chunks on the old nuid can be cleaned up at the end - einfo, err := obs.GetInfo(ctx, meta.Name, GetObjectInfoShowDeleted()) // GetInfo will encode the name - if err != nil && err != ErrObjectNotFound { - return nil, err - } - - // For async error handling - var perr error - var mu sync.Mutex - setErr := func(err error) { - mu.Lock() - defer mu.Unlock() - perr = err - } - getErr := func() error { - mu.Lock() - defer mu.Unlock() - return perr - } - - // Create our own JS context to handle errors etc. - pubJS, err := New(obs.js.conn, WithPublishAsyncErrHandler(func(js JetStream, _ *nats.Msg, err error) { setErr(err) })) - if err != nil { - return nil, err - } - - defer pubJS.(*jetStream).cleanupReplySub() - - purgePartial := func() { - // wait until all pubs are complete or up to default timeout before attempting purge - select { - case <-pubJS.PublishAsyncComplete(): - case <-ctx.Done(): - } - _ = obs.stream.Purge(ctx, WithPurgeSubject(chunkSubj)) - } - - m, h := nats.NewMsg(chunkSubj), sha256.New() - chunk, sent, total := make([]byte, meta.Opts.ChunkSize), 0, uint64(0) - - // set up the info object. The chunk upload sets the size and digest - info := &ObjectInfo{Bucket: obs.name, NUID: newnuid, ObjectMeta: meta} - - for r != nil { - if ctx != nil { - select { - case <-ctx.Done(): - if ctx.Err() == context.Canceled { - err = ctx.Err() - } else { - err = nats.ErrTimeout - } - default: - } - if err != nil { - purgePartial() - return nil, err - } - } - - // Actual read. - // TODO(dlc) - Deadline? - n, readErr := r.Read(chunk) - - // Handle all non EOF errors - if readErr != nil && readErr != io.EOF { - purgePartial() - return nil, readErr - } - - // Add chunk only if we received data - if n > 0 { - // Chunk processing. - m.Data = chunk[:n] - h.Write(m.Data) - - // Send msg itself. - if _, err := pubJS.PublishMsgAsync(m); err != nil { - purgePartial() - return nil, err - } - if err := getErr(); err != nil { - purgePartial() - return nil, err - } - // Update totals. - sent++ - total += uint64(n) - } - - // EOF Processing. - if readErr == io.EOF { - // Place meta info. - info.Size, info.Chunks = uint64(total), uint32(sent) - info.Digest = GetObjectDigestValue(h) - break - } - } - - // Prepare the meta message - metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(meta.Name)) - mm := nats.NewMsg(metaSubj) - mm.Header.Set(MsgRollup, MsgRollupSubject) - mm.Data, err = json.Marshal(info) - if err != nil { - if r != nil { - purgePartial() - } - return nil, err - } - - // Publish the meta message. - _, err = pubJS.PublishMsgAsync(mm) - if err != nil { - if r != nil { - purgePartial() - } - return nil, err - } - - // Wait for all to be processed. - select { - case <-pubJS.PublishAsyncComplete(): - if err := getErr(); err != nil { - if r != nil { - purgePartial() - } - return nil, err - } - case <-ctx.Done(): - return nil, nats.ErrTimeout - } - - info.ModTime = time.Now().UTC() // This time is not actually the correct time - - // Delete any original chunks. - if einfo != nil && !einfo.Deleted { - echunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, einfo.NUID) - _ = obs.stream.Purge(ctx, WithPurgeSubject(echunkSubj)) - } - - // TODO would it be okay to do this to return the info with the correct time? - // With the understanding that it is an extra call to the server. - // Otherwise the time the user gets back is the client time, not the server time. - // return obs.GetInfo(info.Name) - - return info, nil -} - -// GetObjectDigestValue calculates the base64 value of hashed data -func GetObjectDigestValue(data hash.Hash) string { - sha := data.Sum(nil) - return fmt.Sprintf(objDigestTmpl, base64.URLEncoding.EncodeToString(sha[:])) -} - -// DecodeObjectDigest decodes base64 hash -func DecodeObjectDigest(data string) ([]byte, error) { - digest := strings.SplitN(data, "=", 2) - if len(digest) != 2 { - return nil, ErrInvalidDigestFormat - } - return base64.URLEncoding.DecodeString(digest[1]) -} - -func (info *ObjectInfo) isLink() bool { - return info.ObjectMeta.Opts != nil && info.ObjectMeta.Opts.Link != nil -} - -// Get will pull the object from the underlying stream. -func (obs *obs) Get(ctx context.Context, name string, opts ...GetObjectOpt) (ObjectResult, error) { - var o getObjectOpts - for _, opt := range opts { - if opt != nil { - if err := opt(&o); err != nil { - return nil, err - } - } - } - infoOpts := make([]GetObjectInfoOpt, 0) - if o.showDeleted { - infoOpts = append(infoOpts, GetObjectInfoShowDeleted()) - } - - // Grab meta info. - info, err := obs.GetInfo(ctx, name, infoOpts...) - if err != nil { - return nil, err - } - if info.NUID == "" { - return nil, ErrBadObjectMeta - } - - // Check for object links. If single objects we do a pass through. - if info.isLink() { - if info.ObjectMeta.Opts.Link.Name == "" { - return nil, ErrCantGetBucket - } - - // is the link in the same bucket? - lbuck := info.ObjectMeta.Opts.Link.Bucket - if lbuck == obs.name { - return obs.Get(ctx, info.ObjectMeta.Opts.Link.Name) - } - - // different bucket - lobs, err := obs.js.ObjectStore(ctx, lbuck) - if err != nil { - return nil, err - } - return lobs.Get(ctx, info.ObjectMeta.Opts.Link.Name) - } - - result := &objResult{info: info, ctx: ctx} - if info.Size == 0 { - return result, nil - } - - pr, pw := net.Pipe() - result.r = pr - - gotErr := func(m *nats.Msg, err error) { - pw.Close() - m.Sub.Unsubscribe() - result.setErr(err) - } - - // For calculating sum256 - result.digest = sha256.New() - - processChunk := func(m *nats.Msg) { - var err error - if ctx != nil { - select { - case <-ctx.Done(): - if ctx.Err() == context.Canceled { - err = ctx.Err() - } else { - err = nats.ErrTimeout - } - default: - } - if err != nil { - gotErr(m, err) - return - } - } - - tokens, err := parser.GetMetadataFields(m.Reply) - if err != nil { - gotErr(m, err) - return - } - - // Write to our pipe. - for b := m.Data; len(b) > 0; { - n, err := pw.Write(b) - if err != nil { - gotErr(m, err) - return - } - b = b[n:] - } - // Update sha256 - result.digest.Write(m.Data) - - // Check if we are done. - if tokens[parser.AckNumPendingTokenPos] == objNoPending { - pw.Close() - m.Sub.Unsubscribe() - } - } - - chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID) - _, err = obs.pushJS.Subscribe(chunkSubj, processChunk, nats.OrderedConsumer(), nats.Context(ctx)) - if err != nil { - return nil, err - } - - return result, nil -} - -// Delete will delete the object. -func (obs *obs) Delete(ctx context.Context, name string) error { - // Grab meta info. - info, err := obs.GetInfo(ctx, name, GetObjectInfoShowDeleted()) - if err != nil { - return err - } - if info.NUID == "" { - return ErrBadObjectMeta - } - - // Place a rollup delete marker and publish the info - info.Deleted = true - info.Size, info.Chunks, info.Digest = 0, 0, "" - - if err = publishMeta(ctx, info, obs.js); err != nil { - return err - } - - // Purge chunks for the object. - chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID) - return obs.stream.Purge(ctx, WithPurgeSubject(chunkSubj)) -} - -func publishMeta(ctx context.Context, info *ObjectInfo, js *jetStream) error { - // marshal the object into json, don't store an actual time - info.ModTime = time.Time{} - data, err := json.Marshal(info) - if err != nil { - return err - } - - // Prepare and publish the message. - mm := nats.NewMsg(fmt.Sprintf(objMetaPreTmpl, info.Bucket, encodeName(info.ObjectMeta.Name))) - mm.Header.Set(MsgRollup, MsgRollupSubject) - mm.Data = data - if _, err := js.PublishMsg(ctx, mm); err != nil { - return err - } - - // set the ModTime in case it's returned to the user, even though it's not the correct time. - info.ModTime = time.Now().UTC() - return nil -} - -// AddLink will add a link to another object if it's not deleted and not another link -// name is the name of this link object -// obj is what is being linked too -func (obs *obs) AddLink(ctx context.Context, name string, obj *ObjectInfo) (*ObjectInfo, error) { - if name == "" { - return nil, ErrNameRequired - } - - // TODO Handle stale info - - if obj == nil || obj.Name == "" { - return nil, ErrObjectRequired - } - if obj.Deleted { - return nil, ErrNoLinkToDeleted - } - if obj.isLink() { - return nil, ErrNoLinkToLink - } - - // If object with link's name is found, error. - // If link with link's name is found, that's okay to overwrite. - // If there was an error that was not ErrObjectNotFound, error. - einfo, err := obs.GetInfo(ctx, name, GetObjectInfoShowDeleted()) - if einfo != nil { - if !einfo.isLink() { - return nil, ErrObjectAlreadyExists - } - } else if err != ErrObjectNotFound { - return nil, err - } - - // create the meta for the link - meta := &ObjectMeta{ - Name: name, - Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: obj.Bucket, Name: obj.Name}}, - } - info := &ObjectInfo{Bucket: obs.name, NUID: nuid.Next(), ModTime: time.Now().UTC(), ObjectMeta: *meta} - - // put the link object - if err = publishMeta(ctx, info, obs.js); err != nil { - return nil, err - } - - return info, nil -} - -// AddBucketLink will add a link to another object store. -func (ob *obs) AddBucketLink(ctx context.Context, name string, bucket ObjectStore) (*ObjectInfo, error) { - if name == "" { - return nil, ErrNameRequired - } - if bucket == nil { - return nil, ErrBucketRequired - } - bos, ok := bucket.(*obs) - if !ok { - return nil, ErrBucketMalformed - } - - // If object with link's name is found, error. - // If link with link's name is found, that's okay to overwrite. - // If there was an error that was not ErrObjectNotFound, error. - einfo, err := ob.GetInfo(ctx, name, GetObjectInfoShowDeleted()) - if einfo != nil { - if !einfo.isLink() { - return nil, ErrObjectAlreadyExists - } - } else if err != ErrObjectNotFound { - return nil, err - } - - // create the meta for the link - meta := &ObjectMeta{ - Name: name, - Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: bos.name}}, - } - info := &ObjectInfo{Bucket: ob.name, NUID: nuid.Next(), ObjectMeta: *meta} - - // put the link object - err = publishMeta(ctx, info, ob.js) - if err != nil { - return nil, err - } - - return info, nil -} - -// PutBytes is convenience function to put a byte slice into this object store. -func (obs *obs) PutBytes(ctx context.Context, name string, data []byte) (*ObjectInfo, error) { - return obs.Put(ctx, ObjectMeta{Name: name}, bytes.NewReader(data)) -} - -// GetBytes is a convenience function to pull an object from this object store and return it as a byte slice. -func (obs *obs) GetBytes(ctx context.Context, name string, opts ...GetObjectOpt) ([]byte, error) { - result, err := obs.Get(ctx, name, opts...) - if err != nil { - return nil, err - } - defer result.Close() - - var b bytes.Buffer - if _, err := b.ReadFrom(result); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -// PutString is convenience function to put a string into this object store. -func (obs *obs) PutString(ctx context.Context, name string, data string) (*ObjectInfo, error) { - return obs.Put(ctx, ObjectMeta{Name: name}, strings.NewReader(data)) -} - -// GetString is a convenience function to pull an object from this object store and return it as a string. -func (obs *obs) GetString(ctx context.Context, name string, opts ...GetObjectOpt) (string, error) { - result, err := obs.Get(ctx, name, opts...) - if err != nil { - return "", err - } - defer result.Close() - - var b bytes.Buffer - if _, err := b.ReadFrom(result); err != nil { - return "", err - } - return b.String(), nil -} - -// PutFile is convenience function to put a file into an object store. -func (obs *obs) PutFile(ctx context.Context, file string) (*ObjectInfo, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - return obs.Put(ctx, ObjectMeta{Name: file}, f) -} - -// GetFile is a convenience function to pull and object and place in a file. -func (obs *obs) GetFile(ctx context.Context, name, file string, opts ...GetObjectOpt) error { - // Expect file to be new. - f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE, 0600) - if err != nil { - return err - } - defer f.Close() - - result, err := obs.Get(ctx, name, opts...) - if err != nil { - os.Remove(f.Name()) - return err - } - defer result.Close() - - // Stream copy to the file. - _, err = io.Copy(f, result) - return err -} - -// GetInfo will retrieve the current information for the object. -func (obs *obs) GetInfo(ctx context.Context, name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error) { - // Grab last meta value we have. - if name == "" { - return nil, ErrNameRequired - } - var o getObjectInfoOpts - for _, opt := range opts { - if opt != nil { - if err := opt(&o); err != nil { - return nil, err - } - } - } - - metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name)) // used as data in a JS API call - - m, err := obs.stream.GetLastMsgForSubject(ctx, metaSubj) - if err != nil { - if errors.Is(err, ErrMsgNotFound) { - err = ErrObjectNotFound - } - if errors.Is(err, ErrStreamNotFound) { - err = ErrBucketNotFound - } - return nil, err - } - var info ObjectInfo - if err := json.Unmarshal(m.Data, &info); err != nil { - return nil, ErrBadObjectMeta - } - if !o.showDeleted && info.Deleted { - return nil, ErrObjectNotFound - } - info.ModTime = m.Time - return &info, nil -} - -// UpdateMeta will update the meta for the object. -func (obs *obs) UpdateMeta(ctx context.Context, name string, meta ObjectMeta) error { - // Grab the current meta. - info, err := obs.GetInfo(ctx, name) - if err != nil { - if errors.Is(err, ErrObjectNotFound) { - return ErrUpdateMetaDeleted - } - return err - } - - // If the new name is different from the old, and it exists, error - // If there was an error that was not ErrObjectNotFound, error. - if name != meta.Name { - existingInfo, err := obs.GetInfo(ctx, meta.Name, GetObjectInfoShowDeleted()) - if err != nil && !errors.Is(err, ErrObjectNotFound) { - return err - } - if err == nil && !existingInfo.Deleted { - return ErrObjectAlreadyExists - } - } - - // Update Meta prevents update of ObjectMetaOptions (Link, ChunkSize) - // These should only be updated internally when appropriate. - info.Name = meta.Name - info.Description = meta.Description - info.Headers = meta.Headers - info.Metadata = meta.Metadata - - // Prepare the meta message - if err = publishMeta(ctx, info, obs.js); err != nil { - return err - } - - // did the name of this object change? We just stored the meta under the new name - // so delete the meta from the old name via purge stream for subject - if name != meta.Name { - metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name)) - return obs.stream.Purge(ctx, WithPurgeSubject(metaSubj)) - } - - return nil -} - -// Seal will seal the object store, no further modifications will be allowed. -func (obs *obs) Seal(ctx context.Context) error { - si, err := obs.stream.Info(ctx) - if err != nil { - return err - } - // Seal the stream from being able to take on more messages. - cfg := si.Config - cfg.Sealed = true - _, err = obs.js.UpdateStream(ctx, cfg) - return err -} - -// Implementation for Watch -type objWatcher struct { - updates chan *ObjectInfo - sub *nats.Subscription -} - -// Updates returns the interior channel. -func (w *objWatcher) Updates() <-chan *ObjectInfo { - if w == nil { - return nil - } - return w.updates -} - -// Stop will unsubscribe from the watcher. -func (w *objWatcher) Stop() error { - if w == nil { - return nil - } - return w.sub.Unsubscribe() -} - -// Watch for changes in the underlying store and receive meta information updates. -func (obs *obs) Watch(ctx context.Context, opts ...WatchOpt) (ObjectWatcher, error) { - var o watchOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureWatcher(&o); err != nil { - return nil, err - } - } - } - - var initDoneMarker bool - - w := &objWatcher{updates: make(chan *ObjectInfo, 32)} - - update := func(m *nats.Msg) { - var info ObjectInfo - if err := json.Unmarshal(m.Data, &info); err != nil { - return // TODO(dlc) - Communicate this upwards? - } - meta, err := m.Metadata() - if err != nil { - return - } - - if !o.ignoreDeletes || !info.Deleted { - info.ModTime = meta.Timestamp - w.updates <- &info - } - - // if UpdatesOnly is set, no not send nil to the channel - // as it would always be triggered after initializing the watcher - if !initDoneMarker && meta.NumPending == 0 { - initDoneMarker = true - w.updates <- nil - } - } - - allMeta := fmt.Sprintf(objAllMetaPreTmpl, obs.name) - _, err := obs.stream.GetLastMsgForSubject(ctx, allMeta) - // if there are no messages on the stream and we are not watching - // updates only, send nil to the channel to indicate that the initial - // watch is done - if !o.updatesOnly { - if errors.Is(err, ErrMsgNotFound) { - initDoneMarker = true - w.updates <- nil - } - } else { - // if UpdatesOnly was used, mark initialization as complete - initDoneMarker = true - } - - // Used ordered consumer to deliver results. - subOpts := []nats.SubOpt{nats.OrderedConsumer()} - if !o.includeHistory { - subOpts = append(subOpts, nats.DeliverLastPerSubject()) - } - if o.updatesOnly { - subOpts = append(subOpts, nats.DeliverNew()) - } - subOpts = append(subOpts, nats.Context(ctx)) - sub, err := obs.pushJS.Subscribe(allMeta, update, subOpts...) - if err != nil { - return nil, err - } - w.sub = sub - return w, nil -} - -// List will list all the objects in this store. -func (obs *obs) List(ctx context.Context, opts ...ListObjectsOpt) ([]*ObjectInfo, error) { - var o listObjectOpts - for _, opt := range opts { - if opt != nil { - if err := opt(&o); err != nil { - return nil, err - } - } - } - watchOpts := make([]WatchOpt, 0) - if !o.showDeleted { - watchOpts = append(watchOpts, IgnoreDeletes()) - } - watcher, err := obs.Watch(ctx, watchOpts...) - if err != nil { - return nil, err - } - defer watcher.Stop() - - var objs []*ObjectInfo - updates := watcher.Updates() -Updates: - for { - select { - case entry := <-updates: - if entry == nil { - break Updates - } - objs = append(objs, entry) - case <-ctx.Done(): - return nil, ctx.Err() - } - } - if len(objs) == 0 { - return nil, ErrNoObjectsFound - } - return objs, nil -} - -// ObjectBucketStatus represents status of a Bucket, implements ObjectStoreStatus -type ObjectBucketStatus struct { - nfo *StreamInfo - bucket string -} - -// Bucket is the name of the bucket -func (s *ObjectBucketStatus) Bucket() string { return s.bucket } - -// Description is the description supplied when creating the bucket -func (s *ObjectBucketStatus) Description() string { return s.nfo.Config.Description } - -// TTL indicates how long objects are kept in the bucket -func (s *ObjectBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge } - -// Storage indicates the underlying JetStream storage technology used to store data -func (s *ObjectBucketStatus) Storage() StorageType { return s.nfo.Config.Storage } - -// Replicas indicates how many storage replicas are kept for the data in the bucket -func (s *ObjectBucketStatus) Replicas() int { return s.nfo.Config.Replicas } - -// Sealed indicates the stream is sealed and cannot be modified in any way -func (s *ObjectBucketStatus) Sealed() bool { return s.nfo.Config.Sealed } - -// Size is the combined size of all data in the bucket including metadata, in bytes -func (s *ObjectBucketStatus) Size() uint64 { return s.nfo.State.Bytes } - -// BackingStore indicates what technology is used for storage of the bucket -func (s *ObjectBucketStatus) BackingStore() string { return "JetStream" } - -// Metadata is the metadata supplied when creating the bucket -func (s *ObjectBucketStatus) Metadata() map[string]string { return s.nfo.Config.Metadata } - -// StreamInfo is the stream info retrieved to create the status -func (s *ObjectBucketStatus) StreamInfo() *StreamInfo { return s.nfo } - -// IsCompressed indicates if the data is compressed on disk -func (s *ObjectBucketStatus) IsCompressed() bool { return s.nfo.Config.Compression != NoCompression } - -// Status retrieves run-time status about a bucket -func (obs *obs) Status(ctx context.Context) (ObjectStoreStatus, error) { - nfo, err := obs.stream.Info(ctx) - if err != nil { - return nil, err - } - - status := &ObjectBucketStatus{ - nfo: nfo, - bucket: obs.name, - } - - return status, nil -} - -// Read impl. -func (o *objResult) Read(p []byte) (n int, err error) { - o.Lock() - defer o.Unlock() - readDeadline := time.Now().Add(defaultAPITimeout) - if ctx := o.ctx; ctx != nil { - if deadline, ok := ctx.Deadline(); ok { - readDeadline = deadline - } - select { - case <-ctx.Done(): - if ctx.Err() == context.Canceled { - o.err = ctx.Err() - } else { - o.err = nats.ErrTimeout - } - default: - } - } - if o.err != nil { - return 0, o.err - } - if o.r == nil { - return 0, io.EOF - } - - r := o.r.(net.Conn) - _ = r.SetReadDeadline(readDeadline) - n, err = r.Read(p) - if err, ok := err.(net.Error); ok && err.Timeout() { - if ctx := o.ctx; ctx != nil { - select { - case <-ctx.Done(): - if ctx.Err() == context.Canceled { - return 0, ctx.Err() - } else { - return 0, nats.ErrTimeout - } - default: - err = nil - } - } - } - if err == io.EOF { - // Make sure the digest matches. - sha := o.digest.Sum(nil) - rsha, decodeErr := DecodeObjectDigest(o.info.Digest) - if decodeErr != nil { - o.err = decodeErr - return 0, o.err - } - if !bytes.Equal(sha[:], rsha) { - o.err = ErrDigestMismatch - return 0, o.err - } - } - return n, err -} - -// Close impl. -func (o *objResult) Close() error { - o.Lock() - defer o.Unlock() - if o.r == nil { - return nil - } - return o.r.Close() -} - -func (o *objResult) setErr(err error) { - o.Lock() - defer o.Unlock() - o.err = err -} - -func (o *objResult) Info() (*ObjectInfo, error) { - o.Lock() - defer o.Unlock() - return o.info, o.err -} - -func (o *objResult) Error() error { - o.Lock() - defer o.Unlock() - return o.err -} - -// ObjectStoreNames is used to retrieve a list of bucket names -func (js *jetStream) ObjectStoreNames(ctx context.Context) ObjectStoreNamesLister { - res := &obsLister{ - obsNames: make(chan string), - } - l := &streamLister{js: js} - streamsReq := streamsRequest{ - Subject: fmt.Sprintf(objAllChunksPreTmpl, "*"), - } - - go func() { - defer close(res.obsNames) - for { - page, err := l.streamNames(ctx, streamsReq) - if err != nil && !errors.Is(err, ErrEndOfData) { - res.err = err - return - } - for _, name := range page { - if !strings.HasPrefix(name, "OBJ_") { - continue - } - res.obsNames <- strings.TrimPrefix(name, "OBJ_") - } - if errors.Is(err, ErrEndOfData) { - return - } - } - }() - - return res -} - -// ObjectStores is used to retrieve a list of bucket statuses -func (js *jetStream) ObjectStores(ctx context.Context) ObjectStoresLister { - res := &obsLister{ - obs: make(chan ObjectStoreStatus), - } - l := &streamLister{js: js} - streamsReq := streamsRequest{ - Subject: fmt.Sprintf(objAllChunksPreTmpl, "*"), - } - go func() { - defer close(res.obs) - for { - page, err := l.streamInfos(ctx, streamsReq) - if err != nil && !errors.Is(err, ErrEndOfData) { - res.err = err - return - } - for _, info := range page { - if !strings.HasPrefix(info.Config.Name, "OBJ_") { - continue - } - res.obs <- &ObjectBucketStatus{ - nfo: info, - bucket: strings.TrimPrefix(info.Config.Name, "OBJ_"), - } - } - if errors.Is(err, ErrEndOfData) { - return - } - } - }() - - return res -} - -type obsLister struct { - obs chan ObjectStoreStatus - obsNames chan string - err error -} - -func (ol *obsLister) Status() <-chan ObjectStoreStatus { - return ol.obs -} - -func (ol *obsLister) Name() <-chan string { - return ol.obsNames -} - -func (ol *obsLister) Error() error { - return ol.err -} - -func mapStreamToObjectStore(js *jetStream, pushJS nats.JetStreamContext, bucket string, stream Stream) *obs { - info := stream.CachedInfo() - - obs := &obs{ - name: bucket, - js: js, - pushJS: pushJS, - streamName: info.Config.Name, - stream: stream, - } - - return obs -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/object_options.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/object_options.go deleted file mode 100644 index df58364..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/object_options.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2024 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jetstream - -// GetObjectShowDeleted makes [ObjectStore.Get] return object even if it was -// marked as deleted. -func GetObjectShowDeleted() GetObjectOpt { - return func(opts *getObjectOpts) error { - opts.showDeleted = true - return nil - } -} - -// GetObjectInfoShowDeleted makes [ObjectStore.GetInfo] return object info event -// if it was marked as deleted. -func GetObjectInfoShowDeleted() GetObjectInfoOpt { - return func(opts *getObjectInfoOpts) error { - opts.showDeleted = true - return nil - } -} - -// ListObjectsShowDeleted makes [ObjectStore.ListObjects] also return deleted -// objects. -func ListObjectsShowDeleted() ListObjectsOpt { - return func(opts *listObjectOpts) error { - opts.showDeleted = true - return nil - } -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/ordered.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/ordered.go deleted file mode 100644 index fd7fe2f..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/ordered.go +++ /dev/null @@ -1,624 +0,0 @@ -// Copyright 2022-2024 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jetstream - -import ( - "context" - "errors" - "fmt" - "strconv" - "sync" - "sync/atomic" - "time" - - "github.com/nats-io/nats.go" -) - -type ( - orderedConsumer struct { - jetStream *jetStream - cfg *OrderedConsumerConfig - stream string - currentConsumer *pullConsumer - cursor cursor - namePrefix string - serial int - consumerType consumerType - doReset chan struct{} - resetInProgress uint32 - userErrHandler ConsumeErrHandlerFunc - stopAfter int - stopAfterMsgsLeft chan int - withStopAfter bool - runningFetch *fetchResult - sync.Mutex - } - - orderedSubscription struct { - consumer *orderedConsumer - opts []PullMessagesOpt - done chan struct{} - closed uint32 - } - - cursor struct { - streamSeq uint64 - deliverSeq uint64 - } - - consumerType int -) - -const ( - consumerTypeNotSet consumerType = iota - consumerTypeConsume - consumerTypeFetch -) - -var errOrderedSequenceMismatch = errors.New("sequence mismatch") - -// Consume can be used to continuously receive messages and handle them -// with the provided callback function. Consume cannot be used concurrently -// when using ordered consumer. -// -// See [Consumer.Consume] for more details. -func (c *orderedConsumer) Consume(handler MessageHandler, opts ...PullConsumeOpt) (ConsumeContext, error) { - if (c.consumerType == consumerTypeNotSet || c.consumerType == consumerTypeConsume) && c.currentConsumer == nil { - err := c.reset() - if err != nil { - return nil, err - } - } else if c.consumerType == consumerTypeConsume && c.currentConsumer != nil { - return nil, ErrOrderedConsumerConcurrentRequests - } - if c.consumerType == consumerTypeFetch { - return nil, ErrOrderConsumerUsedAsFetch - } - c.consumerType = consumerTypeConsume - consumeOpts, err := parseConsumeOpts(true, opts...) - if err != nil { - return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err) - } - c.userErrHandler = consumeOpts.ErrHandler - opts = append(opts, ConsumeErrHandler(c.errHandler(c.serial))) - if consumeOpts.StopAfter > 0 { - c.withStopAfter = true - c.stopAfter = consumeOpts.StopAfter - } - c.stopAfterMsgsLeft = make(chan int, 1) - if c.stopAfter > 0 { - opts = append(opts, consumeStopAfterNotify(c.stopAfter, c.stopAfterMsgsLeft)) - } - sub := &orderedSubscription{ - consumer: c, - done: make(chan struct{}, 1), - } - internalHandler := func(serial int) func(msg Msg) { - return func(msg Msg) { - // handler is a noop if message was delivered for a consumer with different serial - if serial != c.serial { - return - } - meta, err := msg.Metadata() - if err != nil { - sub, ok := c.currentConsumer.getSubscription("") - if !ok { - return - } - c.errHandler(serial)(sub, err) - return - } - dseq := meta.Sequence.Consumer - if dseq != c.cursor.deliverSeq+1 { - sub, ok := c.currentConsumer.getSubscription("") - if !ok { - return - } - c.errHandler(serial)(sub, errOrderedSequenceMismatch) - return - } - c.cursor.deliverSeq = dseq - c.cursor.streamSeq = meta.Sequence.Stream - handler(msg) - } - } - - _, err = c.currentConsumer.Consume(internalHandler(c.serial), opts...) - if err != nil { - return nil, err - } - - go func() { - for { - select { - case <-c.doReset: - if err := c.reset(); err != nil { - sub, ok := c.currentConsumer.getSubscription("") - if !ok { - return - } - c.errHandler(c.serial)(sub, err) - } - if c.withStopAfter { - select { - case c.stopAfter = <-c.stopAfterMsgsLeft: - default: - } - if c.stopAfter <= 0 { - sub.Stop() - return - } - } - if c.stopAfter > 0 { - opts = opts[:len(opts)-2] - } else { - opts = opts[:len(opts)-1] - } - - // overwrite the previous err handler to use the new serial - opts = append(opts, ConsumeErrHandler(c.errHandler(c.serial))) - if c.withStopAfter { - opts = append(opts, consumeStopAfterNotify(c.stopAfter, c.stopAfterMsgsLeft)) - } - if _, err := c.currentConsumer.Consume(internalHandler(c.serial), opts...); err != nil { - sub, ok := c.currentConsumer.getSubscription("") - if !ok { - return - } - c.errHandler(c.serial)(sub, err) - } - case <-sub.done: - return - case msgsLeft, ok := <-c.stopAfterMsgsLeft: - if !ok { - close(sub.done) - } - c.stopAfter = msgsLeft - return - } - } - }() - return sub, nil -} - -func (c *orderedConsumer) errHandler(serial int) func(cc ConsumeContext, err error) { - return func(cc ConsumeContext, err error) { - c.Lock() - defer c.Unlock() - if c.userErrHandler != nil && !errors.Is(err, errOrderedSequenceMismatch) { - c.userErrHandler(cc, err) - } - if errors.Is(err, ErrNoHeartbeat) || - errors.Is(err, errOrderedSequenceMismatch) || - errors.Is(err, ErrConsumerDeleted) || - errors.Is(err, ErrConsumerNotFound) { - // only reset if serial matches the current consumer serial and there is no reset in progress - if serial == c.serial && atomic.LoadUint32(&c.resetInProgress) == 0 { - atomic.StoreUint32(&c.resetInProgress, 1) - c.doReset <- struct{}{} - } - } - } -} - -// Messages returns MessagesContext, allowing continuously iterating -// over messages on a stream. Messages cannot be used concurrently -// when using ordered consumer. -// -// See [Consumer.Messages] for more details. -func (c *orderedConsumer) Messages(opts ...PullMessagesOpt) (MessagesContext, error) { - if (c.consumerType == consumerTypeNotSet || c.consumerType == consumerTypeConsume) && c.currentConsumer == nil { - err := c.reset() - if err != nil { - return nil, err - } - } else if c.consumerType == consumerTypeConsume && c.currentConsumer != nil { - return nil, ErrOrderedConsumerConcurrentRequests - } - if c.consumerType == consumerTypeFetch { - return nil, ErrOrderConsumerUsedAsFetch - } - c.consumerType = consumerTypeConsume - consumeOpts, err := parseMessagesOpts(true, opts...) - if err != nil { - return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err) - } - opts = append(opts, WithMessagesErrOnMissingHeartbeat(true)) - c.stopAfterMsgsLeft = make(chan int, 1) - if consumeOpts.StopAfter > 0 { - c.withStopAfter = true - c.stopAfter = consumeOpts.StopAfter - } - c.userErrHandler = consumeOpts.ErrHandler - if c.stopAfter > 0 { - opts = append(opts, messagesStopAfterNotify(c.stopAfter, c.stopAfterMsgsLeft)) - } - _, err = c.currentConsumer.Messages(opts...) - if err != nil { - return nil, err - } - - sub := &orderedSubscription{ - consumer: c, - opts: opts, - done: make(chan struct{}, 1), - } - - return sub, nil -} - -func (s *orderedSubscription) Next() (Msg, error) { - for { - currentConsumer := s.consumer.currentConsumer - sub, ok := currentConsumer.getSubscription("") - if !ok { - return nil, ErrMsgIteratorClosed - } - msg, err := sub.Next() - if err != nil { - if errors.Is(err, ErrMsgIteratorClosed) { - s.Stop() - return nil, err - } - if s.consumer.withStopAfter { - select { - case s.consumer.stopAfter = <-s.consumer.stopAfterMsgsLeft: - default: - } - if s.consumer.stopAfter <= 0 { - s.Stop() - return nil, ErrMsgIteratorClosed - } - s.opts[len(s.opts)-1] = StopAfter(s.consumer.stopAfter) - } - if err := s.consumer.reset(); err != nil { - return nil, err - } - _, err := s.consumer.currentConsumer.Messages(s.opts...) - if err != nil { - return nil, err - } - continue - } - meta, err := msg.Metadata() - if err != nil { - s.consumer.errHandler(s.consumer.serial)(sub, err) - continue - } - serial := serialNumberFromConsumer(meta.Consumer) - dseq := meta.Sequence.Consumer - if dseq != s.consumer.cursor.deliverSeq+1 { - s.consumer.errHandler(serial)(sub, errOrderedSequenceMismatch) - continue - } - s.consumer.cursor.deliverSeq = dseq - s.consumer.cursor.streamSeq = meta.Sequence.Stream - return msg, nil - } -} - -func (s *orderedSubscription) Stop() { - if !atomic.CompareAndSwapUint32(&s.closed, 0, 1) { - return - } - sub, ok := s.consumer.currentConsumer.getSubscription("") - if !ok { - return - } - s.consumer.currentConsumer.Lock() - defer s.consumer.currentConsumer.Unlock() - sub.Stop() - close(s.done) -} - -func (s *orderedSubscription) Drain() { - if !atomic.CompareAndSwapUint32(&s.closed, 0, 1) { - return - } - sub, ok := s.consumer.currentConsumer.getSubscription("") - if !ok { - return - } - s.consumer.currentConsumer.Lock() - defer s.consumer.currentConsumer.Unlock() - sub.Drain() - close(s.done) -} - -// Fetch is used to retrieve up to a provided number of messages from a -// stream. This method will always send a single request and wait until -// either all messages are retrieved or request times out. -// -// It is not efficient to use Fetch with on an ordered consumer, as it will -// reset the consumer for each subsequent Fetch call. -// Consider using [Consumer.Consume] or [Consumer.Messages] instead. -func (c *orderedConsumer) Fetch(batch int, opts ...FetchOpt) (MessageBatch, error) { - if c.consumerType == consumerTypeConsume { - return nil, ErrOrderConsumerUsedAsConsume - } - c.currentConsumer.Lock() - if c.runningFetch != nil { - if !c.runningFetch.done { - c.currentConsumer.Unlock() - return nil, ErrOrderedConsumerConcurrentRequests - } - c.cursor.streamSeq = c.runningFetch.sseq - } - c.currentConsumer.Unlock() - c.consumerType = consumerTypeFetch - err := c.reset() - if err != nil { - return nil, err - } - msgs, err := c.currentConsumer.Fetch(batch, opts...) - if err != nil { - return nil, err - } - c.runningFetch = msgs.(*fetchResult) - return msgs, nil -} - -// FetchBytes is used to retrieve up to a provided bytes from the -// stream. This method will always send a single request and wait until -// provided number of bytes is exceeded or request times out. -// -// It is not efficient to use FetchBytes with on an ordered consumer, as it will -// reset the consumer for each subsequent Fetch call. -// Consider using [Consumer.Consume] or [Consumer.Messages] instead. -func (c *orderedConsumer) FetchBytes(maxBytes int, opts ...FetchOpt) (MessageBatch, error) { - if c.consumerType == consumerTypeConsume { - return nil, ErrOrderConsumerUsedAsConsume - } - if c.runningFetch != nil { - if !c.runningFetch.done { - return nil, ErrOrderedConsumerConcurrentRequests - } - c.cursor.streamSeq = c.runningFetch.sseq - } - c.consumerType = consumerTypeFetch - err := c.reset() - if err != nil { - return nil, err - } - msgs, err := c.currentConsumer.FetchBytes(maxBytes, opts...) - if err != nil { - return nil, err - } - c.runningFetch = msgs.(*fetchResult) - return msgs, nil -} - -// FetchNoWait is used to retrieve up to a provided number of messages -// from a stream. This method will always send a single request and -// immediately return up to a provided number of messages or wait until -// at least one message is available or request times out. -// -// It is not efficient to use FetchNoWait with on an ordered consumer, as it will -// reset the consumer for each subsequent Fetch call. -// Consider using [Consumer.Consume] or [Consumer.Messages] instead. -func (c *orderedConsumer) FetchNoWait(batch int) (MessageBatch, error) { - if c.consumerType == consumerTypeConsume { - return nil, ErrOrderConsumerUsedAsConsume - } - if c.runningFetch != nil && !c.runningFetch.done { - return nil, ErrOrderedConsumerConcurrentRequests - } - c.consumerType = consumerTypeFetch - err := c.reset() - if err != nil { - return nil, err - } - return c.currentConsumer.FetchNoWait(batch) -} - -// Next is used to retrieve the next message from the stream. This -// method will block until the message is retrieved or timeout is -// reached. -// -// It is not efficient to use Next with on an ordered consumer, as it will -// reset the consumer for each subsequent Fetch call. -// Consider using [Consumer.Consume] or [Consumer.Messages] instead. -func (c *orderedConsumer) Next(opts ...FetchOpt) (Msg, error) { - res, err := c.Fetch(1, opts...) - if err != nil { - return nil, err - } - msg := <-res.Messages() - if msg != nil { - return msg, nil - } - if res.Error() == nil { - return nil, nats.ErrTimeout - } - return nil, res.Error() -} - -func serialNumberFromConsumer(name string) int { - if len(name) == 0 { - return 0 - } - serial, err := strconv.Atoi(name[len(name)-1:]) - if err != nil { - return 0 - } - return serial -} - -func (c *orderedConsumer) reset() error { - c.Lock() - defer c.Unlock() - defer atomic.StoreUint32(&c.resetInProgress, 0) - if c.currentConsumer != nil { - sub, ok := c.currentConsumer.getSubscription("") - c.currentConsumer.Lock() - if ok { - sub.Stop() - } - consName := c.currentConsumer.CachedInfo().Name - c.currentConsumer.Unlock() - var err error - for i := 0; ; i++ { - if c.cfg.MaxResetAttempts > 0 && i == c.cfg.MaxResetAttempts { - return fmt.Errorf("%w: maximum number of delete attempts reached: %s", ErrOrderedConsumerReset, err) - } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - err = c.jetStream.DeleteConsumer(ctx, c.stream, consName) - cancel() - if err != nil { - if errors.Is(err, ErrConsumerNotFound) { - break - } - if errors.Is(err, nats.ErrTimeout) || errors.Is(err, context.DeadlineExceeded) { - continue - } - return err - } - break - } - } - seq := c.cursor.streamSeq + 1 - c.cursor.deliverSeq = 0 - consumerConfig := c.getConsumerConfigForSeq(seq) - - var err error - var cons Consumer - for i := 0; ; i++ { - if c.cfg.MaxResetAttempts > 0 && i == c.cfg.MaxResetAttempts { - return fmt.Errorf("%w: maximum number of create consumer attempts reached: %s", ErrOrderedConsumerReset, err) - } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - cons, err = c.jetStream.CreateOrUpdateConsumer(ctx, c.stream, *consumerConfig) - if err != nil { - if errors.Is(err, ErrConsumerNotFound) { - cancel() - break - } - if errors.Is(err, nats.ErrTimeout) || errors.Is(err, context.DeadlineExceeded) { - cancel() - continue - } - cancel() - return err - } - cancel() - break - } - c.currentConsumer = cons.(*pullConsumer) - return nil -} - -func (c *orderedConsumer) getConsumerConfigForSeq(seq uint64) *ConsumerConfig { - c.serial++ - name := fmt.Sprintf("%s_%d", c.namePrefix, c.serial) - cfg := &ConsumerConfig{ - Name: name, - DeliverPolicy: DeliverByStartSequencePolicy, - OptStartSeq: seq, - AckPolicy: AckNonePolicy, - InactiveThreshold: 5 * time.Minute, - Replicas: 1, - HeadersOnly: c.cfg.HeadersOnly, - MemoryStorage: true, - } - if len(c.cfg.FilterSubjects) == 1 { - cfg.FilterSubject = c.cfg.FilterSubjects[0] - } else { - cfg.FilterSubjects = c.cfg.FilterSubjects - } - - if seq != c.cfg.OptStartSeq+1 { - return cfg - } - - // initial request, some options may be modified at that point - cfg.DeliverPolicy = c.cfg.DeliverPolicy - if c.cfg.DeliverPolicy == DeliverLastPerSubjectPolicy || - c.cfg.DeliverPolicy == DeliverLastPolicy || - c.cfg.DeliverPolicy == DeliverNewPolicy || - c.cfg.DeliverPolicy == DeliverAllPolicy { - - cfg.OptStartSeq = 0 - } - - if cfg.DeliverPolicy == DeliverLastPerSubjectPolicy && len(c.cfg.FilterSubjects) == 0 { - cfg.FilterSubjects = []string{">"} - } - if c.cfg.OptStartTime != nil { - cfg.OptStartSeq = 0 - cfg.DeliverPolicy = DeliverByStartTimePolicy - cfg.OptStartTime = c.cfg.OptStartTime - } - if c.cfg.InactiveThreshold != 0 { - cfg.InactiveThreshold = c.cfg.InactiveThreshold - } - - return cfg -} - -func consumeStopAfterNotify(numMsgs int, msgsLeftAfterStop chan int) PullConsumeOpt { - return pullOptFunc(func(opts *consumeOpts) error { - opts.StopAfter = numMsgs - opts.stopAfterMsgsLeft = msgsLeftAfterStop - return nil - }) -} - -func messagesStopAfterNotify(numMsgs int, msgsLeftAfterStop chan int) PullMessagesOpt { - return pullOptFunc(func(opts *consumeOpts) error { - opts.StopAfter = numMsgs - opts.stopAfterMsgsLeft = msgsLeftAfterStop - return nil - }) -} - -// Info returns information about the ordered consumer. -// Note that this method will fetch the latest instance of the -// consumer from the server, which can be deleted by the library at any time. -func (c *orderedConsumer) Info(ctx context.Context) (*ConsumerInfo, error) { - c.Lock() - defer c.Unlock() - if c.currentConsumer == nil { - return nil, ErrOrderedConsumerNotCreated - } - infoSubject := apiSubj(c.jetStream.apiPrefix, fmt.Sprintf(apiConsumerInfoT, c.stream, c.currentConsumer.name)) - var resp consumerInfoResponse - - if _, err := c.jetStream.apiRequestJSON(ctx, infoSubject, &resp); err != nil { - return nil, err - } - if resp.Error != nil { - if resp.Error.ErrorCode == JSErrCodeConsumerNotFound { - return nil, ErrConsumerNotFound - } - return nil, resp.Error - } - if resp.Error == nil && resp.ConsumerInfo == nil { - return nil, ErrConsumerNotFound - } - - c.currentConsumer.info = resp.ConsumerInfo - return resp.ConsumerInfo, nil -} - -// CachedInfo returns cached information about the consumer currently -// used by the ordered consumer. Cached info will be updated on every call -// to [Consumer.Info] or on consumer reset. -func (c *orderedConsumer) CachedInfo() *ConsumerInfo { - c.Lock() - defer c.Unlock() - if c.currentConsumer == nil { - return nil - } - return c.currentConsumer.info -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/publish.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/publish.go deleted file mode 100644 index f41b06f..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/publish.go +++ /dev/null @@ -1,563 +0,0 @@ -// Copyright 2022-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jetstream - -import ( - "context" - "crypto/sha256" - "encoding/json" - "errors" - "fmt" - "math/rand" - "strconv" - "strings" - "sync" - "time" - - "github.com/nats-io/nats.go" - "github.com/nats-io/nuid" -) - -type ( - asyncPublisherOpts struct { - // For async publish error handling. - aecb MsgErrHandler - // Max async pub ack in flight - maxpa int - } - - // PublishOpt are the options that can be passed to Publish methods. - PublishOpt func(*pubOpts) error - - pubOpts struct { - id string - lastMsgID string // Expected last msgId - stream string // Expected stream name - lastSeq *uint64 // Expected last sequence - lastSubjectSeq *uint64 // Expected last sequence per subject - - // Publish retries for NoResponders err. - retryWait time.Duration // Retry wait between attempts - retryAttempts int // Retry attempts - - // stallWait is the max wait of a async pub ack. - stallWait time.Duration - - // internal option to re-use existing paf in case of retry. - pafRetry *pubAckFuture - } - - // PubAckFuture is a future for a PubAck. - // It can be used to wait for a PubAck or an error after an async publish. - PubAckFuture interface { - // Ok returns a receive only channel that can be used to get a PubAck. - Ok() <-chan *PubAck - - // Err returns a receive only channel that can be used to get the error from an async publish. - Err() <-chan error - - // Msg returns the message that was sent to the server. - Msg() *nats.Msg - } - - pubAckFuture struct { - jsClient *jetStreamClient - msg *nats.Msg - retries int - maxRetries int - retryWait time.Duration - ack *PubAck - err error - errCh chan error - doneCh chan *PubAck - } - - jetStreamClient struct { - asyncPublishContext - asyncPublisherOpts - } - - // MsgErrHandler is used to process asynchronous errors from JetStream - // PublishAsync. It will return the original message sent to the server for - // possible retransmitting and the error encountered. - MsgErrHandler func(JetStream, *nats.Msg, error) - - asyncPublishContext struct { - sync.RWMutex - replyPrefix string - replySub *nats.Subscription - acks map[string]*pubAckFuture - stallCh chan struct{} - doneCh chan struct{} - rr *rand.Rand - // channel to signal when server is disconnected or conn is closed - connStatusCh chan (nats.Status) - } - - pubAckResponse struct { - apiResponse - *PubAck - } - - // PubAck is an ack received after successfully publishing a message. - PubAck struct { - // Stream is the stream name the message was published to. - Stream string `json:"stream"` - - // Sequence is the stream sequence number of the message. - Sequence uint64 `json:"seq"` - - // Duplicate indicates whether the message was a duplicate. - // Duplicate can be detected using the [MsgIDHeader] and [StreamConfig.Duplicates]. - Duplicate bool `json:"duplicate,omitempty"` - - // Domain is the domain the message was published to. - Domain string `json:"domain,omitempty"` - } -) - -const ( - // Default time wait between retries on Publish if err is ErrNoResponders. - DefaultPubRetryWait = 250 * time.Millisecond - - // Default number of retries - DefaultPubRetryAttempts = 2 -) - -const ( - statusHdr = "Status" - - rdigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" - base = 62 -) - -// Publish performs a synchronous publish to a stream and waits for ack -// from server. It accepts subject name (which must be bound to a stream) -// and message payload. -func (js *jetStream) Publish(ctx context.Context, subj string, data []byte, opts ...PublishOpt) (*PubAck, error) { - return js.PublishMsg(ctx, &nats.Msg{Subject: subj, Data: data}, opts...) -} - -// PublishMsg performs a synchronous publish to a stream and waits for -// ack from server. It accepts subject name (which must be bound to a -// stream) and nats.Message. -func (js *jetStream) PublishMsg(ctx context.Context, m *nats.Msg, opts ...PublishOpt) (*PubAck, error) { - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - o := pubOpts{ - retryWait: DefaultPubRetryWait, - retryAttempts: DefaultPubRetryAttempts, - } - if len(opts) > 0 { - if m.Header == nil { - m.Header = nats.Header{} - } - for _, opt := range opts { - if err := opt(&o); err != nil { - return nil, err - } - } - } - if o.stallWait > 0 { - return nil, fmt.Errorf("%w: stall wait cannot be set to sync publish", ErrInvalidOption) - } - - if o.id != "" { - m.Header.Set(MsgIDHeader, o.id) - } - if o.lastMsgID != "" { - m.Header.Set(ExpectedLastMsgIDHeader, o.lastMsgID) - } - if o.stream != "" { - m.Header.Set(ExpectedStreamHeader, o.stream) - } - if o.lastSeq != nil { - m.Header.Set(ExpectedLastSeqHeader, strconv.FormatUint(*o.lastSeq, 10)) - } - if o.lastSubjectSeq != nil { - m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(*o.lastSubjectSeq, 10)) - } - - var resp *nats.Msg - var err error - - resp, err = js.conn.RequestMsgWithContext(ctx, m) - - if err != nil { - for r := 0; errors.Is(err, nats.ErrNoResponders) && (r < o.retryAttempts || o.retryAttempts < 0); r++ { - // To protect against small blips in leadership changes etc, if we get a no responders here retry. - select { - case <-ctx.Done(): - case <-time.After(o.retryWait): - } - resp, err = js.conn.RequestMsgWithContext(ctx, m) - } - if err != nil { - if errors.Is(err, nats.ErrNoResponders) { - return nil, ErrNoStreamResponse - } - return nil, err - } - } - - var ackResp pubAckResponse - if err := json.Unmarshal(resp.Data, &ackResp); err != nil { - return nil, ErrInvalidJSAck - } - if ackResp.Error != nil { - return nil, fmt.Errorf("nats: %w", ackResp.Error) - } - if ackResp.PubAck == nil || ackResp.PubAck.Stream == "" { - return nil, ErrInvalidJSAck - } - return ackResp.PubAck, nil -} - -// PublishAsync performs an asynchronous publish to a stream and returns -// [PubAckFuture] interface. It accepts subject name (which must be bound -// to a stream) and message payload. -func (js *jetStream) PublishAsync(subj string, data []byte, opts ...PublishOpt) (PubAckFuture, error) { - return js.PublishMsgAsync(&nats.Msg{Subject: subj, Data: data}, opts...) -} - -// PublishMsgAsync performs an asynchronous publish to a stream and -// returns [PubAckFuture] interface. It accepts subject name (which must -// be bound to a stream) and nats.Message. -func (js *jetStream) PublishMsgAsync(m *nats.Msg, opts ...PublishOpt) (PubAckFuture, error) { - o := pubOpts{ - retryWait: DefaultPubRetryWait, - retryAttempts: DefaultPubRetryAttempts, - } - if len(opts) > 0 { - if m.Header == nil { - m.Header = nats.Header{} - } - for _, opt := range opts { - if err := opt(&o); err != nil { - return nil, err - } - } - } - defaultStallWait := 200 * time.Millisecond - - stallWait := defaultStallWait - if o.stallWait > 0 { - stallWait = o.stallWait - } - - if o.id != "" { - m.Header.Set(MsgIDHeader, o.id) - } - if o.lastMsgID != "" { - m.Header.Set(ExpectedLastMsgIDHeader, o.lastMsgID) - } - if o.stream != "" { - m.Header.Set(ExpectedStreamHeader, o.stream) - } - if o.lastSeq != nil { - m.Header.Set(ExpectedLastSeqHeader, strconv.FormatUint(*o.lastSeq, 10)) - } - if o.lastSubjectSeq != nil { - m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(*o.lastSubjectSeq, 10)) - } - - paf := o.pafRetry - if paf == nil && m.Reply != "" { - return nil, ErrAsyncPublishReplySubjectSet - } - - var id string - - // register new paf if not retrying - if paf == nil { - var err error - m.Reply, err = js.newAsyncReply() - defer func() { m.Reply = "" }() - if err != nil { - return nil, fmt.Errorf("nats: error creating async reply handler: %s", err) - } - id = m.Reply[js.replyPrefixLen:] - paf = &pubAckFuture{msg: m, jsClient: js.publisher, maxRetries: o.retryAttempts, retryWait: o.retryWait} - numPending, maxPending := js.registerPAF(id, paf) - - if maxPending > 0 && numPending > maxPending { - select { - case <-js.asyncStall(): - case <-time.After(stallWait): - js.clearPAF(id) - return nil, ErrTooManyStalledMsgs - } - } - } else { - // when retrying, get the ID from existing reply subject - id = m.Reply[js.replyPrefixLen:] - } - - if err := js.conn.PublishMsg(m); err != nil { - js.clearPAF(id) - return nil, err - } - - return paf, nil -} - -// For quick token lookup etc. -const ( - aReplyTokensize = 6 -) - -func (js *jetStream) newAsyncReply() (string, error) { - js.publisher.Lock() - if js.publisher.replySub == nil { - // Create our wildcard reply subject. - sha := sha256.New() - sha.Write([]byte(nuid.Next())) - b := sha.Sum(nil) - for i := 0; i < aReplyTokensize; i++ { - b[i] = rdigits[int(b[i]%base)] - } - js.publisher.replyPrefix = fmt.Sprintf("%s%s.", js.replyPrefix, b[:aReplyTokensize]) - sub, err := js.conn.Subscribe(fmt.Sprintf("%s*", js.publisher.replyPrefix), js.handleAsyncReply) - if err != nil { - js.publisher.Unlock() - return "", err - } - js.publisher.replySub = sub - js.publisher.rr = rand.New(rand.NewSource(time.Now().UnixNano())) - } - if js.publisher.connStatusCh == nil { - js.publisher.connStatusCh = js.conn.StatusChanged(nats.RECONNECTING, nats.CLOSED) - go js.resetPendingAcksOnReconnect() - } - var sb strings.Builder - sb.WriteString(js.publisher.replyPrefix) - rn := js.publisher.rr.Int63() - var b [aReplyTokensize]byte - for i, l := 0, rn; i < len(b); i++ { - b[i] = rdigits[l%base] - l /= base - } - sb.Write(b[:]) - js.publisher.Unlock() - return sb.String(), nil -} - -// Handle an async reply from PublishAsync. -func (js *jetStream) handleAsyncReply(m *nats.Msg) { - if len(m.Subject) <= js.replyPrefixLen { - return - } - id := m.Subject[js.replyPrefixLen:] - - js.publisher.Lock() - - paf := js.getPAF(id) - if paf == nil { - js.publisher.Unlock() - return - } - - doErr := func(err error) { - paf.err = err - if paf.errCh != nil { - paf.errCh <- paf.err - } - cb := js.publisher.asyncPublisherOpts.aecb - js.publisher.Unlock() - if cb != nil { - cb(js, paf.msg, err) - } - } - - // Process no responders etc. - if len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders { - if paf.retries < paf.maxRetries { - paf.retries++ - paf.msg.Reply = m.Subject - time.AfterFunc(paf.retryWait, func() { - _, err := js.PublishMsgAsync(paf.msg, func(po *pubOpts) error { - po.pafRetry = paf - return nil - }) - if err != nil { - js.publisher.Lock() - doErr(err) - } - }) - js.publisher.Unlock() - return - } - delete(js.publisher.acks, id) - doErr(ErrNoStreamResponse) - return - } - - // Remove - delete(js.publisher.acks, id) - - // Check on anyone stalled and waiting. - if js.publisher.stallCh != nil && len(js.publisher.acks) < js.publisher.asyncPublisherOpts.maxpa { - close(js.publisher.stallCh) - js.publisher.stallCh = nil - } - // Check on anyone waiting on done status. - if js.publisher.doneCh != nil && len(js.publisher.acks) == 0 { - dch := js.publisher.doneCh - js.publisher.doneCh = nil - // Defer here so error is processed and can be checked. - defer close(dch) - } - - var pa pubAckResponse - if err := json.Unmarshal(m.Data, &pa); err != nil { - doErr(ErrInvalidJSAck) - return - } - if pa.Error != nil { - doErr(pa.Error) - return - } - if pa.PubAck == nil || pa.PubAck.Stream == "" { - doErr(ErrInvalidJSAck) - return - } - - // So here we have received a proper puback. - paf.ack = pa.PubAck - if paf.doneCh != nil { - paf.doneCh <- paf.ack - } - js.publisher.Unlock() -} - -func (js *jetStream) resetPendingAcksOnReconnect() { - js.publisher.Lock() - connStatusCh := js.publisher.connStatusCh - js.publisher.Unlock() - for { - newStatus, ok := <-connStatusCh - if !ok || newStatus == nats.CLOSED { - return - } - js.publisher.Lock() - for _, paf := range js.publisher.acks { - paf.err = nats.ErrDisconnected - } - js.publisher.acks = nil - if js.publisher.doneCh != nil { - close(js.publisher.doneCh) - js.publisher.doneCh = nil - } - js.publisher.Unlock() - } -} - -// registerPAF will register for a PubAckFuture. -func (js *jetStream) registerPAF(id string, paf *pubAckFuture) (int, int) { - js.publisher.Lock() - if js.publisher.acks == nil { - js.publisher.acks = make(map[string]*pubAckFuture) - } - js.publisher.acks[id] = paf - np := len(js.publisher.acks) - maxpa := js.publisher.asyncPublisherOpts.maxpa - js.publisher.Unlock() - return np, maxpa -} - -// Lock should be held. -func (js *jetStream) getPAF(id string) *pubAckFuture { - if js.publisher.acks == nil { - return nil - } - return js.publisher.acks[id] -} - -// clearPAF will remove a PubAckFuture that was registered. -func (js *jetStream) clearPAF(id string) { - js.publisher.Lock() - delete(js.publisher.acks, id) - js.publisher.Unlock() -} - -func (js *jetStream) asyncStall() <-chan struct{} { - js.publisher.Lock() - if js.publisher.stallCh == nil { - js.publisher.stallCh = make(chan struct{}) - } - stc := js.publisher.stallCh - js.publisher.Unlock() - return stc -} - -func (paf *pubAckFuture) Ok() <-chan *PubAck { - paf.jsClient.Lock() - defer paf.jsClient.Unlock() - - if paf.doneCh == nil { - paf.doneCh = make(chan *PubAck, 1) - if paf.ack != nil { - paf.doneCh <- paf.ack - } - } - - return paf.doneCh -} - -func (paf *pubAckFuture) Err() <-chan error { - paf.jsClient.Lock() - defer paf.jsClient.Unlock() - - if paf.errCh == nil { - paf.errCh = make(chan error, 1) - if paf.err != nil { - paf.errCh <- paf.err - } - } - - return paf.errCh -} - -func (paf *pubAckFuture) Msg() *nats.Msg { - paf.jsClient.RLock() - defer paf.jsClient.RUnlock() - return paf.msg -} - -// PublishAsyncPending returns the number of async publishes outstanding -// for this context. -func (js *jetStream) PublishAsyncPending() int { - js.publisher.RLock() - defer js.publisher.RUnlock() - return len(js.publisher.acks) -} - -// PublishAsyncComplete returns a channel that will be closed when all -// outstanding asynchronously published messages are acknowledged by the -// server. -func (js *jetStream) PublishAsyncComplete() <-chan struct{} { - js.publisher.Lock() - defer js.publisher.Unlock() - if js.publisher.doneCh == nil { - js.publisher.doneCh = make(chan struct{}) - } - dch := js.publisher.doneCh - if len(js.publisher.acks) == 0 { - close(js.publisher.doneCh) - js.publisher.doneCh = nil - } - return dch -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/pull.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/pull.go deleted file mode 100644 index bb5479a..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/pull.go +++ /dev/null @@ -1,1154 +0,0 @@ -// Copyright 2022-2024 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jetstream - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "math" - "sync" - "sync/atomic" - "time" - - "github.com/nats-io/nats.go" - "github.com/nats-io/nuid" -) - -type ( - // MessagesContext supports iterating over a messages on a stream. - // It is returned by [Consumer.Messages] method. - MessagesContext interface { - // Next retrieves next message on a stream. It will block until the next - // message is available. If the context is cancelled, Next will return - // ErrMsgIteratorClosed error. - Next() (Msg, error) - - // Stop unsubscribes from the stream and cancels subscription. Calling - // Next after calling Stop will return ErrMsgIteratorClosed error. - // All messages that are already in the buffer are discarded. - Stop() - - // Drain unsubscribes from the stream and cancels subscription. All - // messages that are already in the buffer will be available on - // subsequent calls to Next. After the buffer is drained, Next will - // return ErrMsgIteratorClosed error. - Drain() - } - - // ConsumeContext supports processing incoming messages from a stream. - // It is returned by [Consumer.Consume] method. - ConsumeContext interface { - // Stop unsubscribes from the stream and cancels subscription. - // No more messages will be received after calling this method. - // All messages that are already in the buffer are discarded. - Stop() - - // Drain unsubscribes from the stream and cancels subscription. - // All messages that are already in the buffer will be processed in callback function. - Drain() - } - - // MessageHandler is a handler function used as callback in [Consume]. - MessageHandler func(msg Msg) - - // PullConsumeOpt represent additional options used in [Consume] for pull consumers. - PullConsumeOpt interface { - configureConsume(*consumeOpts) error - } - - // PullMessagesOpt represent additional options used in [Messages] for pull consumers. - PullMessagesOpt interface { - configureMessages(*consumeOpts) error - } - - pullConsumer struct { - sync.Mutex - jetStream *jetStream - stream string - durable bool - name string - info *ConsumerInfo - subscriptions map[string]*pullSubscription - } - - pullRequest struct { - Expires time.Duration `json:"expires,omitempty"` - Batch int `json:"batch,omitempty"` - MaxBytes int `json:"max_bytes,omitempty"` - NoWait bool `json:"no_wait,omitempty"` - Heartbeat time.Duration `json:"idle_heartbeat,omitempty"` - } - - consumeOpts struct { - Expires time.Duration - MaxMessages int - MaxBytes int - Heartbeat time.Duration - ErrHandler ConsumeErrHandlerFunc - ReportMissingHeartbeats bool - ThresholdMessages int - ThresholdBytes int - StopAfter int - stopAfterMsgsLeft chan int - } - - ConsumeErrHandlerFunc func(consumeCtx ConsumeContext, err error) - - pullSubscription struct { - sync.Mutex - id string - consumer *pullConsumer - subscription *nats.Subscription - msgs chan *nats.Msg - errs chan error - pending pendingMsgs - hbMonitor *hbMonitor - fetchInProgress uint32 - closed uint32 - draining uint32 - done chan struct{} - connStatusChanged chan nats.Status - fetchNext chan *pullRequest - consumeOpts *consumeOpts - delivered int - } - - pendingMsgs struct { - msgCount int - byteCount int - } - - MessageBatch interface { - Messages() <-chan Msg - Error() error - } - - fetchResult struct { - msgs chan Msg - err error - done bool - sseq uint64 - } - - FetchOpt func(*pullRequest) error - - hbMonitor struct { - timer *time.Timer - sync.Mutex - } -) - -const ( - DefaultMaxMessages = 500 - DefaultExpires = 30 * time.Second - unset = -1 -) - -func min(x, y int) int { - if x < y { - return x - } - return y -} - -// Consume can be used to continuously receive messages and handle them -// with the provided callback function. Consume cannot be used concurrently -// when using ordered consumer. -// -// See [Consumer.Consume] for more details. -func (p *pullConsumer) Consume(handler MessageHandler, opts ...PullConsumeOpt) (ConsumeContext, error) { - if handler == nil { - return nil, ErrHandlerRequired - } - consumeOpts, err := parseConsumeOpts(false, opts...) - if err != nil { - return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err) - } - p.Lock() - - subject := apiSubj(p.jetStream.apiPrefix, fmt.Sprintf(apiRequestNextT, p.stream, p.name)) - - // for single consume, use empty string as id - // this is useful for ordered consumer, where only a single subscription is valid - var consumeID string - if len(p.subscriptions) > 0 { - consumeID = nuid.Next() - } - sub := &pullSubscription{ - id: consumeID, - consumer: p, - errs: make(chan error, 1), - done: make(chan struct{}, 1), - fetchNext: make(chan *pullRequest, 1), - consumeOpts: consumeOpts, - } - sub.connStatusChanged = p.jetStream.conn.StatusChanged(nats.CONNECTED, nats.RECONNECTING) - - sub.hbMonitor = sub.scheduleHeartbeatCheck(consumeOpts.Heartbeat) - - p.subscriptions[sub.id] = sub - p.Unlock() - - internalHandler := func(msg *nats.Msg) { - if sub.hbMonitor != nil { - sub.hbMonitor.Stop() - } - userMsg, msgErr := checkMsg(msg) - if !userMsg && msgErr == nil { - if sub.hbMonitor != nil { - sub.hbMonitor.Reset(2 * consumeOpts.Heartbeat) - } - return - } - defer func() { - sub.Lock() - sub.checkPending() - if sub.hbMonitor != nil { - sub.hbMonitor.Reset(2 * consumeOpts.Heartbeat) - } - sub.Unlock() - }() - if !userMsg { - // heartbeat message - if msgErr == nil { - return - } - - sub.Lock() - err := sub.handleStatusMsg(msg, msgErr) - sub.Unlock() - - if err != nil { - if atomic.LoadUint32(&sub.closed) == 1 { - return - } - if sub.consumeOpts.ErrHandler != nil { - sub.consumeOpts.ErrHandler(sub, err) - } - sub.Stop() - } - return - } - handler(p.jetStream.toJSMsg(msg)) - sub.Lock() - sub.decrementPendingMsgs(msg) - sub.incrementDeliveredMsgs() - sub.Unlock() - - if sub.consumeOpts.StopAfter > 0 && sub.consumeOpts.StopAfter == sub.delivered { - sub.Stop() - } - } - inbox := p.jetStream.conn.NewInbox() - sub.subscription, err = p.jetStream.conn.Subscribe(inbox, internalHandler) - if err != nil { - return nil, err - } - sub.subscription.SetClosedHandler(func(sid string) func(string) { - return func(subject string) { - p.Lock() - defer p.Unlock() - delete(p.subscriptions, sid) - atomic.CompareAndSwapUint32(&sub.draining, 1, 0) - } - }(sub.id)) - - sub.Lock() - // initial pull - sub.resetPendingMsgs() - batchSize := sub.consumeOpts.MaxMessages - if sub.consumeOpts.StopAfter > 0 { - batchSize = min(batchSize, sub.consumeOpts.StopAfter-sub.delivered) - } - if err := sub.pull(&pullRequest{ - Expires: consumeOpts.Expires, - Batch: batchSize, - MaxBytes: consumeOpts.MaxBytes, - Heartbeat: consumeOpts.Heartbeat, - }, subject); err != nil { - sub.errs <- err - } - sub.Unlock() - - go func() { - isConnected := true - for { - if atomic.LoadUint32(&sub.closed) == 1 { - return - } - select { - case status, ok := <-sub.connStatusChanged: - if !ok { - continue - } - if status == nats.RECONNECTING { - if sub.hbMonitor != nil { - sub.hbMonitor.Stop() - } - isConnected = false - } - if status == nats.CONNECTED { - sub.Lock() - if !isConnected { - isConnected = true - // try fetching consumer info several times to make sure consumer is available after reconnect - backoffOpts := backoffOpts{ - attempts: 10, - initialInterval: 1 * time.Second, - disableInitialExecution: true, - factor: 2, - maxInterval: 10 * time.Second, - cancel: sub.done, - } - err = retryWithBackoff(func(attempt int) (bool, error) { - isClosed := atomic.LoadUint32(&sub.closed) == 1 - if isClosed { - return false, nil - } - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - _, err := p.Info(ctx) - if err != nil { - if sub.consumeOpts.ErrHandler != nil { - err = fmt.Errorf("[%d] attempting to fetch consumer info after reconnect: %w", attempt, err) - if attempt == backoffOpts.attempts-1 { - err = errors.Join(err, fmt.Errorf("maximum retry attempts reached")) - } - sub.consumeOpts.ErrHandler(sub, err) - } - return true, err - } - return false, nil - }, backoffOpts) - if err != nil { - if sub.consumeOpts.ErrHandler != nil { - sub.consumeOpts.ErrHandler(sub, err) - } - sub.Unlock() - sub.cleanup() - return - } - - sub.fetchNext <- &pullRequest{ - Expires: sub.consumeOpts.Expires, - Batch: sub.consumeOpts.MaxMessages, - MaxBytes: sub.consumeOpts.MaxBytes, - Heartbeat: sub.consumeOpts.Heartbeat, - } - if sub.hbMonitor != nil { - sub.hbMonitor.Reset(2 * sub.consumeOpts.Heartbeat) - } - sub.resetPendingMsgs() - } - sub.Unlock() - } - case err := <-sub.errs: - sub.Lock() - if sub.consumeOpts.ErrHandler != nil { - sub.consumeOpts.ErrHandler(sub, err) - } - if errors.Is(err, ErrNoHeartbeat) { - batchSize := sub.consumeOpts.MaxMessages - if sub.consumeOpts.StopAfter > 0 { - batchSize = min(batchSize, sub.consumeOpts.StopAfter-sub.delivered) - } - sub.fetchNext <- &pullRequest{ - Expires: sub.consumeOpts.Expires, - Batch: batchSize, - MaxBytes: sub.consumeOpts.MaxBytes, - Heartbeat: sub.consumeOpts.Heartbeat, - } - if sub.hbMonitor != nil { - sub.hbMonitor.Reset(2 * sub.consumeOpts.Heartbeat) - } - sub.resetPendingMsgs() - } - sub.Unlock() - case <-sub.done: - return - } - } - }() - - go sub.pullMessages(subject) - - return sub, nil -} - -// resetPendingMsgs resets pending message count and byte count -// to the values set in consumeOpts -// lock should be held before calling this method -func (s *pullSubscription) resetPendingMsgs() { - s.pending.msgCount = s.consumeOpts.MaxMessages - s.pending.byteCount = s.consumeOpts.MaxBytes -} - -// decrementPendingMsgs decrements pending message count and byte count -// lock should be held before calling this method -func (s *pullSubscription) decrementPendingMsgs(msg *nats.Msg) { - s.pending.msgCount-- - if s.consumeOpts.MaxBytes != 0 { - s.pending.byteCount -= msg.Size() - } -} - -// incrementDeliveredMsgs increments delivered message count -// lock should be held before calling this method -func (s *pullSubscription) incrementDeliveredMsgs() { - s.delivered++ -} - -// checkPending verifies whether there are enough messages in -// the buffer to trigger a new pull request. -// lock should be held before calling this method -func (s *pullSubscription) checkPending() { - if (s.pending.msgCount < s.consumeOpts.ThresholdMessages || - (s.pending.byteCount < s.consumeOpts.ThresholdBytes && s.consumeOpts.MaxBytes != 0)) && - atomic.LoadUint32(&s.fetchInProgress) == 0 { - - var batchSize, maxBytes int - if s.consumeOpts.MaxBytes == 0 { - // if using messages, calculate appropriate batch size - batchSize = s.consumeOpts.MaxMessages - s.pending.msgCount - } else { - // if using bytes, use the max value - batchSize = s.consumeOpts.MaxMessages - maxBytes = s.consumeOpts.MaxBytes - s.pending.byteCount - } - if s.consumeOpts.StopAfter > 0 { - batchSize = min(batchSize, s.consumeOpts.StopAfter-s.delivered-s.pending.msgCount) - } - if batchSize > 0 { - s.fetchNext <- &pullRequest{ - Expires: s.consumeOpts.Expires, - Batch: batchSize, - MaxBytes: maxBytes, - Heartbeat: s.consumeOpts.Heartbeat, - } - - s.pending.msgCount = s.consumeOpts.MaxMessages - s.pending.byteCount = s.consumeOpts.MaxBytes - } - } -} - -// Messages returns MessagesContext, allowing continuously iterating -// over messages on a stream. Messages cannot be used concurrently -// when using ordered consumer. -// -// See [Consumer.Messages] for more details. -func (p *pullConsumer) Messages(opts ...PullMessagesOpt) (MessagesContext, error) { - consumeOpts, err := parseMessagesOpts(false, opts...) - if err != nil { - return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err) - } - - p.Lock() - subject := apiSubj(p.jetStream.apiPrefix, fmt.Sprintf(apiRequestNextT, p.stream, p.name)) - - msgs := make(chan *nats.Msg, consumeOpts.MaxMessages) - - // for single consume, use empty string as id - // this is useful for ordered consumer, where only a single subscription is valid - var consumeID string - if len(p.subscriptions) > 0 { - consumeID = nuid.Next() - } - sub := &pullSubscription{ - id: consumeID, - consumer: p, - done: make(chan struct{}, 1), - msgs: msgs, - errs: make(chan error, 1), - fetchNext: make(chan *pullRequest, 1), - consumeOpts: consumeOpts, - } - sub.connStatusChanged = p.jetStream.conn.StatusChanged(nats.CONNECTED, nats.RECONNECTING) - inbox := p.jetStream.conn.NewInbox() - sub.subscription, err = p.jetStream.conn.ChanSubscribe(inbox, sub.msgs) - if err != nil { - p.Unlock() - return nil, err - } - sub.subscription.SetClosedHandler(func(sid string) func(string) { - return func(subject string) { - p.Lock() - defer p.Unlock() - if atomic.LoadUint32(&sub.draining) != 1 { - // if we're not draining, subscription can be closed as soon - // as closed handler is called - // otherwise, we need to wait until all messages are drained - // in Next - delete(p.subscriptions, sid) - } - close(msgs) - } - }(sub.id)) - - p.subscriptions[sub.id] = sub - p.Unlock() - - go sub.pullMessages(subject) - - go func() { - for { - select { - case status, ok := <-sub.connStatusChanged: - if !ok { - return - } - if status == nats.CONNECTED { - sub.errs <- errConnected - } - if status == nats.RECONNECTING { - sub.errs <- errDisconnected - } - case <-sub.done: - return - } - } - }() - - return sub, nil -} - -var ( - errConnected = errors.New("connected") - errDisconnected = errors.New("disconnected") -) - -// Next retrieves next message on a stream. It will block until the next -// message is available. If the context is cancelled, Next will return -// ErrMsgIteratorClosed error. -func (s *pullSubscription) Next() (Msg, error) { - s.Lock() - defer s.Unlock() - drainMode := atomic.LoadUint32(&s.draining) == 1 - closed := atomic.LoadUint32(&s.closed) == 1 - if closed && !drainMode { - return nil, ErrMsgIteratorClosed - } - hbMonitor := s.scheduleHeartbeatCheck(2 * s.consumeOpts.Heartbeat) - defer func() { - if hbMonitor != nil { - hbMonitor.Stop() - } - }() - - isConnected := true - if s.consumeOpts.StopAfter > 0 && s.delivered >= s.consumeOpts.StopAfter { - s.Stop() - return nil, ErrMsgIteratorClosed - } - - for { - s.checkPending() - select { - case msg, ok := <-s.msgs: - if !ok { - // if msgs channel is closed, it means that subscription was either drained or stopped - delete(s.consumer.subscriptions, s.id) - atomic.CompareAndSwapUint32(&s.draining, 1, 0) - return nil, ErrMsgIteratorClosed - } - if hbMonitor != nil { - hbMonitor.Reset(2 * s.consumeOpts.Heartbeat) - } - userMsg, msgErr := checkMsg(msg) - if !userMsg { - // heartbeat message - if msgErr == nil { - continue - } - if err := s.handleStatusMsg(msg, msgErr); err != nil { - s.Stop() - return nil, err - } - continue - } - s.decrementPendingMsgs(msg) - s.incrementDeliveredMsgs() - return s.consumer.jetStream.toJSMsg(msg), nil - case err := <-s.errs: - if errors.Is(err, ErrNoHeartbeat) { - s.pending.msgCount = 0 - s.pending.byteCount = 0 - if s.consumeOpts.ReportMissingHeartbeats { - return nil, err - } - if hbMonitor != nil { - hbMonitor.Reset(2 * s.consumeOpts.Heartbeat) - } - } - if errors.Is(err, errConnected) { - if !isConnected { - isConnected = true - // try fetching consumer info several times to make sure consumer is available after reconnect - backoffOpts := backoffOpts{ - attempts: 10, - initialInterval: 1 * time.Second, - disableInitialExecution: true, - factor: 2, - maxInterval: 10 * time.Second, - cancel: s.done, - } - err = retryWithBackoff(func(attempt int) (bool, error) { - isClosed := atomic.LoadUint32(&s.closed) == 1 - if isClosed { - return false, nil - } - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - _, err := s.consumer.Info(ctx) - if err != nil { - if errors.Is(err, ErrConsumerNotFound) { - return false, err - } - if attempt == backoffOpts.attempts-1 { - return true, fmt.Errorf("could not get consumer info after server reconnect: %w", err) - } - return true, err - } - return false, nil - }, backoffOpts) - if err != nil { - s.Stop() - return nil, err - } - - s.pending.msgCount = 0 - s.pending.byteCount = 0 - if hbMonitor != nil { - hbMonitor.Reset(2 * s.consumeOpts.Heartbeat) - } - } - } - if errors.Is(err, errDisconnected) { - if hbMonitor != nil { - hbMonitor.Reset(2 * s.consumeOpts.Heartbeat) - } - isConnected = false - } - } - } -} - -func (s *pullSubscription) handleStatusMsg(msg *nats.Msg, msgErr error) error { - if !errors.Is(msgErr, nats.ErrTimeout) && !errors.Is(msgErr, ErrMaxBytesExceeded) { - if errors.Is(msgErr, ErrConsumerDeleted) || errors.Is(msgErr, ErrBadRequest) { - return msgErr - } - if s.consumeOpts.ErrHandler != nil { - s.consumeOpts.ErrHandler(s, msgErr) - } - if errors.Is(msgErr, ErrConsumerLeadershipChanged) { - s.pending.msgCount = 0 - s.pending.byteCount = 0 - } - return nil - } - msgsLeft, bytesLeft, err := parsePending(msg) - if err != nil { - return err - } - s.pending.msgCount -= msgsLeft - if s.pending.msgCount < 0 { - s.pending.msgCount = 0 - } - if s.consumeOpts.MaxBytes > 0 { - s.pending.byteCount -= bytesLeft - if s.pending.byteCount < 0 { - s.pending.byteCount = 0 - } - } - return nil -} - -func (hb *hbMonitor) Stop() { - hb.Mutex.Lock() - hb.timer.Stop() - hb.Mutex.Unlock() -} - -func (hb *hbMonitor) Reset(dur time.Duration) { - hb.Mutex.Lock() - hb.timer.Reset(dur) - hb.Mutex.Unlock() -} - -// Stop unsubscribes from the stream and cancels subscription. Calling -// Next after calling Stop will return ErrMsgIteratorClosed error. -// All messages that are already in the buffer are discarded. -func (s *pullSubscription) Stop() { - if !atomic.CompareAndSwapUint32(&s.closed, 0, 1) { - return - } - close(s.done) - if s.consumeOpts.stopAfterMsgsLeft != nil { - if s.delivered >= s.consumeOpts.StopAfter { - close(s.consumeOpts.stopAfterMsgsLeft) - } else { - s.consumeOpts.stopAfterMsgsLeft <- s.consumeOpts.StopAfter - s.delivered - } - } -} - -// Drain unsubscribes from the stream and cancels subscription. All -// messages that are already in the buffer will be available on -// subsequent calls to Next. After the buffer is drained, Next will -// return ErrMsgIteratorClosed error. -func (s *pullSubscription) Drain() { - if !atomic.CompareAndSwapUint32(&s.closed, 0, 1) { - return - } - atomic.StoreUint32(&s.draining, 1) - close(s.done) - if s.consumeOpts.stopAfterMsgsLeft != nil { - if s.delivered >= s.consumeOpts.StopAfter { - close(s.consumeOpts.stopAfterMsgsLeft) - } else { - s.consumeOpts.stopAfterMsgsLeft <- s.consumeOpts.StopAfter - s.delivered - } - } -} - -// Fetch sends a single request to retrieve given number of messages. -// It will wait up to provided expiry time if not all messages are available. -func (p *pullConsumer) Fetch(batch int, opts ...FetchOpt) (MessageBatch, error) { - req := &pullRequest{ - Batch: batch, - Expires: DefaultExpires, - Heartbeat: unset, - } - for _, opt := range opts { - if err := opt(req); err != nil { - return nil, err - } - } - // if heartbeat was not explicitly set, set it to 5 seconds for longer pulls - // and disable it for shorter pulls - if req.Heartbeat == unset { - if req.Expires >= 10*time.Second { - req.Heartbeat = 5 * time.Second - } else { - req.Heartbeat = 0 - } - } - if req.Expires < 2*req.Heartbeat { - return nil, fmt.Errorf("%w: expiry time should be at least 2 times the heartbeat", ErrInvalidOption) - } - - return p.fetch(req) -} - -// FetchBytes is used to retrieve up to a provided bytes from the stream. -func (p *pullConsumer) FetchBytes(maxBytes int, opts ...FetchOpt) (MessageBatch, error) { - req := &pullRequest{ - Batch: 1000000, - MaxBytes: maxBytes, - Expires: DefaultExpires, - Heartbeat: unset, - } - for _, opt := range opts { - if err := opt(req); err != nil { - return nil, err - } - } - // if heartbeat was not explicitly set, set it to 5 seconds for longer pulls - // and disable it for shorter pulls - if req.Heartbeat == unset { - if req.Expires >= 10*time.Second { - req.Heartbeat = 5 * time.Second - } else { - req.Heartbeat = 0 - } - } - if req.Expires < 2*req.Heartbeat { - return nil, fmt.Errorf("%w: expiry time should be at least 2 times the heartbeat", ErrInvalidOption) - } - - return p.fetch(req) -} - -// FetchNoWait sends a single request to retrieve given number of messages. -// FetchNoWait will only return messages that are available at the time of the -// request. It will not wait for more messages to arrive. -func (p *pullConsumer) FetchNoWait(batch int) (MessageBatch, error) { - req := &pullRequest{ - Batch: batch, - NoWait: true, - } - - return p.fetch(req) -} - -func (p *pullConsumer) fetch(req *pullRequest) (MessageBatch, error) { - res := &fetchResult{ - msgs: make(chan Msg, req.Batch), - } - msgs := make(chan *nats.Msg, 2*req.Batch) - subject := apiSubj(p.jetStream.apiPrefix, fmt.Sprintf(apiRequestNextT, p.stream, p.name)) - - sub := &pullSubscription{ - consumer: p, - done: make(chan struct{}, 1), - msgs: msgs, - errs: make(chan error, 1), - } - inbox := p.jetStream.conn.NewInbox() - var err error - sub.subscription, err = p.jetStream.conn.ChanSubscribe(inbox, sub.msgs) - if err != nil { - return nil, err - } - if err := sub.pull(req, subject); err != nil { - return nil, err - } - - var receivedMsgs, receivedBytes int - hbTimer := sub.scheduleHeartbeatCheck(req.Heartbeat) - go func(res *fetchResult) { - defer sub.subscription.Unsubscribe() - defer close(res.msgs) - for { - select { - case msg := <-msgs: - p.Lock() - if hbTimer != nil { - hbTimer.Reset(2 * req.Heartbeat) - } - userMsg, err := checkMsg(msg) - if err != nil { - errNotTimeoutOrNoMsgs := !errors.Is(err, nats.ErrTimeout) && !errors.Is(err, ErrNoMessages) - if errNotTimeoutOrNoMsgs && !errors.Is(err, ErrMaxBytesExceeded) { - res.err = err - } - res.done = true - p.Unlock() - return - } - if !userMsg { - p.Unlock() - continue - } - res.msgs <- p.jetStream.toJSMsg(msg) - meta, err := msg.Metadata() - if err != nil { - res.err = fmt.Errorf("parsing message metadata: %s", err) - } - res.sseq = meta.Sequence.Stream - receivedMsgs++ - if req.MaxBytes != 0 { - receivedBytes += msg.Size() - } - if receivedMsgs == req.Batch || (req.MaxBytes != 0 && receivedBytes >= req.MaxBytes) { - res.done = true - p.Unlock() - return - } - p.Unlock() - case err := <-sub.errs: - res.err = err - res.done = true - return - case <-time.After(req.Expires + 1*time.Second): - res.done = true - return - } - } - }(res) - return res, nil -} - -func (fr *fetchResult) Messages() <-chan Msg { - return fr.msgs -} - -func (fr *fetchResult) Error() error { - return fr.err -} - -// Next is used to retrieve the next message from the stream. This -// method will block until the message is retrieved or timeout is -// reached. -func (p *pullConsumer) Next(opts ...FetchOpt) (Msg, error) { - res, err := p.Fetch(1, opts...) - if err != nil { - return nil, err - } - msg := <-res.Messages() - if msg != nil { - return msg, nil - } - if res.Error() == nil { - return nil, nats.ErrTimeout - } - return nil, res.Error() -} - -func (s *pullSubscription) pullMessages(subject string) { - for { - select { - case req := <-s.fetchNext: - atomic.StoreUint32(&s.fetchInProgress, 1) - - if err := s.pull(req, subject); err != nil { - if errors.Is(err, ErrMsgIteratorClosed) { - s.cleanup() - return - } - s.errs <- err - } - atomic.StoreUint32(&s.fetchInProgress, 0) - case <-s.done: - s.cleanup() - return - } - } -} - -func (s *pullSubscription) scheduleHeartbeatCheck(dur time.Duration) *hbMonitor { - if dur == 0 { - return nil - } - return &hbMonitor{ - timer: time.AfterFunc(2*dur, func() { - s.errs <- ErrNoHeartbeat - }), - } -} - -func (s *pullSubscription) cleanup() { - // For now this function does not need to hold the lock. - // Holding the lock here might cause a deadlock if Next() - // is already holding the lock and waiting. - // The fields that are read (subscription, hbMonitor) - // are read only (Only written on creation of pullSubscription). - if s.subscription == nil || !s.subscription.IsValid() { - return - } - if s.hbMonitor != nil { - s.hbMonitor.Stop() - } - drainMode := atomic.LoadUint32(&s.draining) == 1 - if drainMode { - s.subscription.Drain() - } else { - s.subscription.Unsubscribe() - } - atomic.StoreUint32(&s.closed, 1) -} - -// pull sends a pull request to the server and waits for messages using a subscription from [pullSubscription]. -// Messages will be fetched up to given batch_size or until there are no more messages or timeout is returned -func (s *pullSubscription) pull(req *pullRequest, subject string) error { - s.consumer.Lock() - defer s.consumer.Unlock() - if atomic.LoadUint32(&s.closed) == 1 { - return ErrMsgIteratorClosed - } - if req.Batch < 1 { - return fmt.Errorf("%w: batch size must be at least 1", nats.ErrInvalidArg) - } - reqJSON, err := json.Marshal(req) - if err != nil { - return err - } - - reply := s.subscription.Subject - if err := s.consumer.jetStream.conn.PublishRequest(subject, reply, reqJSON); err != nil { - return err - } - return nil -} - -func parseConsumeOpts(ordered bool, opts ...PullConsumeOpt) (*consumeOpts, error) { - consumeOpts := &consumeOpts{ - MaxMessages: unset, - MaxBytes: unset, - Expires: DefaultExpires, - Heartbeat: unset, - ReportMissingHeartbeats: true, - StopAfter: unset, - } - for _, opt := range opts { - if err := opt.configureConsume(consumeOpts); err != nil { - return nil, err - } - } - if err := consumeOpts.setDefaults(ordered); err != nil { - return nil, err - } - return consumeOpts, nil -} - -func parseMessagesOpts(ordered bool, opts ...PullMessagesOpt) (*consumeOpts, error) { - consumeOpts := &consumeOpts{ - MaxMessages: unset, - MaxBytes: unset, - Expires: DefaultExpires, - Heartbeat: unset, - ReportMissingHeartbeats: true, - StopAfter: unset, - } - for _, opt := range opts { - if err := opt.configureMessages(consumeOpts); err != nil { - return nil, err - } - } - if err := consumeOpts.setDefaults(ordered); err != nil { - return nil, err - } - return consumeOpts, nil -} - -func (consumeOpts *consumeOpts) setDefaults(ordered bool) error { - if consumeOpts.MaxBytes != unset && consumeOpts.MaxMessages != unset { - return fmt.Errorf("only one of MaxMessages and MaxBytes can be specified") - } - if consumeOpts.MaxBytes != unset { - // when max_bytes is used, set batch size to a very large number - consumeOpts.MaxMessages = 1000000 - } else if consumeOpts.MaxMessages != unset { - consumeOpts.MaxBytes = 0 - } else { - if consumeOpts.MaxBytes == unset { - consumeOpts.MaxBytes = 0 - } - if consumeOpts.MaxMessages == unset { - consumeOpts.MaxMessages = DefaultMaxMessages - } - } - - if consumeOpts.ThresholdMessages == 0 { - consumeOpts.ThresholdMessages = int(math.Ceil(float64(consumeOpts.MaxMessages) / 2)) - } - if consumeOpts.ThresholdBytes == 0 { - consumeOpts.ThresholdBytes = int(math.Ceil(float64(consumeOpts.MaxBytes) / 2)) - } - if consumeOpts.Heartbeat == unset { - if ordered { - consumeOpts.Heartbeat = 5 * time.Second - if consumeOpts.Expires < 10*time.Second { - consumeOpts.Heartbeat = consumeOpts.Expires / 2 - } - } else { - consumeOpts.Heartbeat = consumeOpts.Expires / 2 - if consumeOpts.Heartbeat > 30*time.Second { - consumeOpts.Heartbeat = 30 * time.Second - } - } - } - if consumeOpts.Heartbeat > consumeOpts.Expires/2 { - return fmt.Errorf("the value of Heartbeat must be less than 50%% of expiry") - } - return nil -} - -type backoffOpts struct { - // total retry attempts - // -1 for unlimited - attempts int - // initial interval after which first retry will be performed - // defaults to 1s - initialInterval time.Duration - // determines whether first function execution should be performed immediately - disableInitialExecution bool - // multiplier on each attempt - // defaults to 2 - factor float64 - // max interval between retries - // after reaching this value, all subsequent - // retries will be performed with this interval - // defaults to 1 minute - maxInterval time.Duration - // custom backoff intervals - // if set, overrides all other options except attempts - // if attempts are set, then the last interval will be used - // for all subsequent retries after reaching the limit - customBackoff []time.Duration - // cancel channel - // if set, retry will be cancelled when this channel is closed - cancel <-chan struct{} -} - -func retryWithBackoff(f func(int) (bool, error), opts backoffOpts) error { - var err error - var shouldContinue bool - // if custom backoff is set, use it instead of other options - if len(opts.customBackoff) > 0 { - if opts.attempts != 0 { - return fmt.Errorf("cannot use custom backoff intervals when attempts are set") - } - for i, interval := range opts.customBackoff { - select { - case <-opts.cancel: - return nil - case <-time.After(interval): - } - shouldContinue, err = f(i) - if !shouldContinue { - return err - } - } - return err - } - - // set default options - if opts.initialInterval == 0 { - opts.initialInterval = 1 * time.Second - } - if opts.factor == 0 { - opts.factor = 2 - } - if opts.maxInterval == 0 { - opts.maxInterval = 1 * time.Minute - } - if opts.attempts == 0 { - return fmt.Errorf("retry attempts have to be set when not using custom backoff intervals") - } - interval := opts.initialInterval - for i := 0; ; i++ { - if i == 0 && opts.disableInitialExecution { - time.Sleep(interval) - continue - } - shouldContinue, err = f(i) - if !shouldContinue { - return err - } - if opts.attempts > 0 && i >= opts.attempts-1 { - break - } - select { - case <-opts.cancel: - return nil - case <-time.After(interval): - } - interval = time.Duration(float64(interval) * opts.factor) - if interval >= opts.maxInterval { - interval = opts.maxInterval - } - } - return err -} - -func (c *pullConsumer) getSubscription(id string) (*pullSubscription, bool) { - c.Lock() - defer c.Unlock() - sub, ok := c.subscriptions[id] - return sub, ok -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/stream.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/stream.go deleted file mode 100644 index 01c9d58..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/stream.go +++ /dev/null @@ -1,719 +0,0 @@ -// Copyright 2022-2024 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jetstream - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "strconv" - "time" - - "github.com/nats-io/nats.go" - "github.com/nats-io/nuid" -) - -type ( - // Stream contains CRUD methods on a consumer via [ConsumerManager], as well - // as operations on an existing stream. It allows fetching and removing - // messages from a stream, as well as purging a stream. - Stream interface { - ConsumerManager - - // Info returns StreamInfo from the server. - Info(ctx context.Context, opts ...StreamInfoOpt) (*StreamInfo, error) - - // CachedInfo returns ConsumerInfo currently cached on this stream. - // This method does not perform any network requests. The cached - // StreamInfo is updated on every call to Info and Update. - CachedInfo() *StreamInfo - - // Purge removes messages from a stream. It is a destructive operation. - // Use with caution. See StreamPurgeOpt for available options. - Purge(ctx context.Context, opts ...StreamPurgeOpt) error - - // GetMsg retrieves a raw stream message stored in JetStream by sequence number. - GetMsg(ctx context.Context, seq uint64, opts ...GetMsgOpt) (*RawStreamMsg, error) - - // GetLastMsgForSubject retrieves the last raw stream message stored in - // JetStream on a given subject subject. - GetLastMsgForSubject(ctx context.Context, subject string) (*RawStreamMsg, error) - - // DeleteMsg deletes a message from a stream. - // On the server, the message is marked as erased, but not overwritten. - DeleteMsg(ctx context.Context, seq uint64) error - - // SecureDeleteMsg deletes a message from a stream. The deleted message - // is overwritten with random data. As a result, this operation is slower - // than DeleteMsg. - SecureDeleteMsg(ctx context.Context, seq uint64) error - } - - // ConsumerManager provides CRUD API for managing consumers. It is - // available as a part of [Stream] interface. CreateConsumer, - // UpdateConsumer, CreateOrUpdateConsumer and Consumer methods return a - // [Consumer] interface, allowing to operate on a consumer (e.g. consume - // messages). - ConsumerManager interface { - // CreateOrUpdateConsumer creates a consumer on a given stream with - // given config. If consumer already exists, it will be updated (if - // possible). Consumer interface is returned, allowing to operate on a - // consumer (e.g. fetch messages). - CreateOrUpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) - - // CreateConsumer creates a consumer on a given stream with given - // config. If consumer already exists and the provided configuration - // differs from its configuration, ErrConsumerExists is returned. If the - // provided configuration is the same as the existing consumer, the - // existing consumer is returned. Consumer interface is returned, - // allowing to operate on a consumer (e.g. fetch messages). - CreateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) - - // UpdateConsumer updates an existing consumer. If consumer does not - // exist, ErrConsumerDoesNotExist is returned. Consumer interface is - // returned, allowing to operate on a consumer (e.g. fetch messages). - UpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) - - // OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer - // are managed by the library and provide a simple way to consume - // messages from a stream. Ordered consumers are ephemeral in-memory - // pull consumers and are resilient to deletes and restarts. - OrderedConsumer(ctx context.Context, cfg OrderedConsumerConfig) (Consumer, error) - - // Consumer returns an interface to an existing consumer, allowing processing - // of messages. If consumer does not exist, ErrConsumerNotFound is - // returned. - Consumer(ctx context.Context, consumer string) (Consumer, error) - - // DeleteConsumer removes a consumer with given name from a stream. - // If consumer does not exist, ErrConsumerNotFound is returned. - DeleteConsumer(ctx context.Context, consumer string) error - - // ListConsumers returns ConsumerInfoLister enabling iterating over a - // channel of consumer infos. - ListConsumers(context.Context) ConsumerInfoLister - - // ConsumerNames returns a ConsumerNameLister enabling iterating over a - // channel of consumer names. - ConsumerNames(context.Context) ConsumerNameLister - } - - RawStreamMsg struct { - Subject string - Sequence uint64 - Header nats.Header - Data []byte - Time time.Time - } - - stream struct { - name string - info *StreamInfo - jetStream *jetStream - } - - // StreamInfoOpt is a function setting options for [Stream.Info] - StreamInfoOpt func(*streamInfoRequest) error - - streamInfoRequest struct { - apiPaged - DeletedDetails bool `json:"deleted_details,omitempty"` - SubjectFilter string `json:"subjects_filter,omitempty"` - } - - consumerInfoResponse struct { - apiResponse - *ConsumerInfo - } - - // StreamPurgeOpt is a function setting options for [Stream.Purge] - StreamPurgeOpt func(*StreamPurgeRequest) error - - // StreamPurgeRequest is an API request body to purge a stream. - - StreamPurgeRequest struct { - // Purge up to but not including sequence. - Sequence uint64 `json:"seq,omitempty"` - // Subject to match against messages for the purge command. - Subject string `json:"filter,omitempty"` - // Number of messages to keep. - Keep uint64 `json:"keep,omitempty"` - } - - streamPurgeResponse struct { - apiResponse - Success bool `json:"success,omitempty"` - Purged uint64 `json:"purged"` - } - - consumerDeleteResponse struct { - apiResponse - Success bool `json:"success,omitempty"` - } - - // GetMsgOpt is a function setting options for [Stream.GetMsg] - GetMsgOpt func(*apiMsgGetRequest) error - - apiMsgGetRequest struct { - Seq uint64 `json:"seq,omitempty"` - LastFor string `json:"last_by_subj,omitempty"` - NextFor string `json:"next_by_subj,omitempty"` - } - - // apiMsgGetResponse is the response for a Stream get request. - apiMsgGetResponse struct { - apiResponse - Message *storedMsg `json:"message,omitempty"` - } - - // storedMsg is a raw message stored in JetStream. - storedMsg struct { - Subject string `json:"subject"` - Sequence uint64 `json:"seq"` - Header []byte `json:"hdrs,omitempty"` - Data []byte `json:"data,omitempty"` - Time time.Time `json:"time"` - } - - msgDeleteRequest struct { - Seq uint64 `json:"seq"` - NoErase bool `json:"no_erase,omitempty"` - } - - msgDeleteResponse struct { - apiResponse - Success bool `json:"success,omitempty"` - } - - // ConsumerInfoLister is used to iterate over a channel of consumer infos. - // Err method can be used to check for errors encountered during iteration. - // Info channel is always closed and therefore can be used in a range loop. - ConsumerInfoLister interface { - Info() <-chan *ConsumerInfo - Err() error - } - - // ConsumerNameLister is used to iterate over a channel of consumer names. - // Err method can be used to check for errors encountered during iteration. - // Name channel is always closed and therefore can be used in a range loop. - ConsumerNameLister interface { - Name() <-chan string - Err() error - } - - consumerLister struct { - js *jetStream - offset int - pageInfo *apiPaged - - consumers chan *ConsumerInfo - names chan string - err error - } - - consumerListResponse struct { - apiResponse - apiPaged - Consumers []*ConsumerInfo `json:"consumers"` - } - - consumerNamesResponse struct { - apiResponse - apiPaged - Consumers []string `json:"consumers"` - } -) - -// CreateOrUpdateConsumer creates a consumer on a given stream with -// given config. If consumer already exists, it will be updated (if -// possible). Consumer interface is returned, allowing to operate on a -// consumer (e.g. fetch messages). -func (s *stream) CreateOrUpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) { - return upsertConsumer(ctx, s.jetStream, s.name, cfg, consumerActionCreateOrUpdate) -} - -// CreateConsumer creates a consumer on a given stream with given -// config. If consumer already exists and the provided configuration -// differs from its configuration, ErrConsumerExists is returned. If the -// provided configuration is the same as the existing consumer, the -// existing consumer is returned. Consumer interface is returned, -// allowing to operate on a consumer (e.g. fetch messages). -func (s *stream) CreateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) { - return upsertConsumer(ctx, s.jetStream, s.name, cfg, consumerActionCreate) -} - -// UpdateConsumer updates an existing consumer. If consumer does not -// exist, ErrConsumerDoesNotExist is returned. Consumer interface is -// returned, allowing to operate on a consumer (e.g. fetch messages). -func (s *stream) UpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) { - return upsertConsumer(ctx, s.jetStream, s.name, cfg, consumerActionUpdate) -} - -// OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer -// are managed by the library and provide a simple way to consume -// messages from a stream. Ordered consumers are ephemeral in-memory -// pull consumers and are resilient to deletes and restarts. -func (s *stream) OrderedConsumer(ctx context.Context, cfg OrderedConsumerConfig) (Consumer, error) { - oc := &orderedConsumer{ - jetStream: s.jetStream, - cfg: &cfg, - stream: s.name, - namePrefix: nuid.Next(), - doReset: make(chan struct{}, 1), - } - if cfg.OptStartSeq != 0 { - oc.cursor.streamSeq = cfg.OptStartSeq - 1 - } - err := oc.reset() - if err != nil { - return nil, err - } - - return oc, nil -} - -// Consumer returns an interface to an existing consumer, allowing processing -// of messages. If consumer does not exist, ErrConsumerNotFound is -// returned. -func (s *stream) Consumer(ctx context.Context, name string) (Consumer, error) { - return getConsumer(ctx, s.jetStream, s.name, name) -} - -// DeleteConsumer removes a consumer with given name from a stream. -// If consumer does not exist, ErrConsumerNotFound is returned. -func (s *stream) DeleteConsumer(ctx context.Context, name string) error { - return deleteConsumer(ctx, s.jetStream, s.name, name) -} - -// Info returns StreamInfo from the server. -func (s *stream) Info(ctx context.Context, opts ...StreamInfoOpt) (*StreamInfo, error) { - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - var infoReq *streamInfoRequest - for _, opt := range opts { - if infoReq == nil { - infoReq = &streamInfoRequest{} - } - if err := opt(infoReq); err != nil { - return nil, err - } - } - var req []byte - var err error - var subjectMap map[string]uint64 - var offset int - - infoSubject := apiSubj(s.jetStream.apiPrefix, fmt.Sprintf(apiStreamInfoT, s.name)) - var info *StreamInfo - for { - if infoReq != nil { - if infoReq.SubjectFilter != "" { - if subjectMap == nil { - subjectMap = make(map[string]uint64) - } - infoReq.Offset = offset - } - req, err = json.Marshal(infoReq) - if err != nil { - return nil, err - } - } - var resp streamInfoResponse - if _, err = s.jetStream.apiRequestJSON(ctx, infoSubject, &resp, req); err != nil { - return nil, err - } - if resp.Error != nil { - if resp.Error.ErrorCode == JSErrCodeStreamNotFound { - return nil, ErrStreamNotFound - } - return nil, resp.Error - } - info = resp.StreamInfo - var total int - if resp.Total != 0 { - total = resp.Total - } - if len(resp.StreamInfo.State.Subjects) > 0 { - for subj, msgs := range resp.StreamInfo.State.Subjects { - subjectMap[subj] = msgs - } - offset = len(subjectMap) - } - if total == 0 || total <= offset { - info.State.Subjects = nil - // we don't want to store subjects in cache - cached := *info - s.info = &cached - info.State.Subjects = subjectMap - break - } - } - - return info, nil -} - -// CachedInfo returns ConsumerInfo currently cached on this stream. -// This method does not perform any network requests. The cached -// StreamInfo is updated on every call to Info and Update. -func (s *stream) CachedInfo() *StreamInfo { - return s.info -} - -// Purge removes messages from a stream. It is a destructive operation. -// Use with caution. See StreamPurgeOpt for available options. -func (s *stream) Purge(ctx context.Context, opts ...StreamPurgeOpt) error { - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - var purgeReq StreamPurgeRequest - for _, opt := range opts { - if err := opt(&purgeReq); err != nil { - return err - } - } - var req []byte - var err error - req, err = json.Marshal(purgeReq) - if err != nil { - return err - } - - purgeSubject := apiSubj(s.jetStream.apiPrefix, fmt.Sprintf(apiStreamPurgeT, s.name)) - - var resp streamPurgeResponse - if _, err = s.jetStream.apiRequestJSON(ctx, purgeSubject, &resp, req); err != nil { - return err - } - if resp.Error != nil { - return resp.Error - } - - return nil -} - -// GetMsg retrieves a raw stream message stored in JetStream by sequence number. -func (s *stream) GetMsg(ctx context.Context, seq uint64, opts ...GetMsgOpt) (*RawStreamMsg, error) { - req := &apiMsgGetRequest{Seq: seq} - for _, opt := range opts { - if err := opt(req); err != nil { - return nil, err - } - } - return s.getMsg(ctx, req) -} - -// GetLastMsgForSubject retrieves the last raw stream message stored in -// JetStream on a given subject subject. -func (s *stream) GetLastMsgForSubject(ctx context.Context, subject string) (*RawStreamMsg, error) { - return s.getMsg(ctx, &apiMsgGetRequest{LastFor: subject}) -} - -func (s *stream) getMsg(ctx context.Context, mreq *apiMsgGetRequest) (*RawStreamMsg, error) { - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - req, err := json.Marshal(mreq) - if err != nil { - return nil, err - } - var gmSubj string - - // handle direct gets - if s.info.Config.AllowDirect { - if mreq.LastFor != "" { - gmSubj = apiSubj(s.jetStream.apiPrefix, fmt.Sprintf(apiDirectMsgGetLastBySubjectT, s.name, mreq.LastFor)) - r, err := s.jetStream.apiRequest(ctx, gmSubj, nil) - if err != nil { - return nil, err - } - return convertDirectGetMsgResponseToMsg(s.name, r.msg) - } - gmSubj = apiSubj(s.jetStream.apiPrefix, fmt.Sprintf(apiDirectMsgGetT, s.name)) - r, err := s.jetStream.apiRequest(ctx, gmSubj, req) - if err != nil { - return nil, err - } - return convertDirectGetMsgResponseToMsg(s.name, r.msg) - } - - var resp apiMsgGetResponse - dsSubj := apiSubj(s.jetStream.apiPrefix, fmt.Sprintf(apiMsgGetT, s.name)) - _, err = s.jetStream.apiRequestJSON(ctx, dsSubj, &resp, req) - if err != nil { - return nil, err - } - - if resp.Error != nil { - if resp.Error.ErrorCode == JSErrCodeMessageNotFound { - return nil, ErrMsgNotFound - } - return nil, resp.Error - } - - msg := resp.Message - - var hdr nats.Header - if len(msg.Header) > 0 { - hdr, err = nats.DecodeHeadersMsg(msg.Header) - if err != nil { - return nil, err - } - } - - return &RawStreamMsg{ - Subject: msg.Subject, - Sequence: msg.Sequence, - Header: hdr, - Data: msg.Data, - Time: msg.Time, - }, nil -} - -func convertDirectGetMsgResponseToMsg(name string, r *nats.Msg) (*RawStreamMsg, error) { - // Check for 404/408. We would get a no-payload message and a "Status" header - if len(r.Data) == 0 { - val := r.Header.Get(statusHdr) - if val != "" { - switch val { - case noMessages: - return nil, ErrMsgNotFound - default: - desc := r.Header.Get("Description") - if desc == "" { - desc = "unable to get message" - } - return nil, fmt.Errorf("nats: %s", desc) - } - } - } - // Check for headers that give us the required information to - // reconstruct the message. - if len(r.Header) == 0 { - return nil, fmt.Errorf("nats: response should have headers") - } - stream := r.Header.Get(StreamHeader) - if stream == "" { - return nil, fmt.Errorf("nats: missing stream header") - } - - seqStr := r.Header.Get(SequenceHeader) - if seqStr == "" { - return nil, fmt.Errorf("nats: missing sequence header") - } - seq, err := strconv.ParseUint(seqStr, 10, 64) - if err != nil { - return nil, fmt.Errorf("nats: invalid sequence header '%s': %v", seqStr, err) - } - timeStr := r.Header.Get(TimeStampHeaer) - if timeStr == "" { - return nil, fmt.Errorf("nats: missing timestamp header") - } - - tm, err := time.Parse(time.RFC3339Nano, timeStr) - if err != nil { - return nil, fmt.Errorf("nats: invalid timestamp header '%s': %v", timeStr, err) - } - subj := r.Header.Get(SubjectHeader) - if subj == "" { - return nil, fmt.Errorf("nats: missing subject header") - } - return &RawStreamMsg{ - Subject: subj, - Sequence: seq, - Header: r.Header, - Data: r.Data, - Time: tm, - }, nil -} - -// DeleteMsg deletes a message from a stream. -// On the server, the message is marked as erased, but not overwritten. -func (s *stream) DeleteMsg(ctx context.Context, seq uint64) error { - return s.deleteMsg(ctx, &msgDeleteRequest{Seq: seq, NoErase: true}) -} - -// SecureDeleteMsg deletes a message from a stream. The deleted message -// is overwritten with random data. As a result, this operation is slower -// than DeleteMsg. -func (s *stream) SecureDeleteMsg(ctx context.Context, seq uint64) error { - return s.deleteMsg(ctx, &msgDeleteRequest{Seq: seq}) -} - -func (s *stream) deleteMsg(ctx context.Context, req *msgDeleteRequest) error { - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - r, err := json.Marshal(req) - if err != nil { - return err - } - subj := apiSubj(s.jetStream.apiPrefix, fmt.Sprintf(apiMsgDeleteT, s.name)) - var resp msgDeleteResponse - if _, err = s.jetStream.apiRequestJSON(ctx, subj, &resp, r); err != nil { - return err - } - if !resp.Success { - return fmt.Errorf("%w: %s", ErrMsgDeleteUnsuccessful, err) - } - return nil -} - -// ListConsumers returns ConsumerInfoLister enabling iterating over a -// channel of consumer infos. -func (s *stream) ListConsumers(ctx context.Context) ConsumerInfoLister { - l := &consumerLister{ - js: s.jetStream, - consumers: make(chan *ConsumerInfo), - } - go func() { - defer close(l.consumers) - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - for { - page, err := l.consumerInfos(ctx, s.name) - if err != nil && !errors.Is(err, ErrEndOfData) { - l.err = err - return - } - for _, info := range page { - select { - case <-ctx.Done(): - l.err = ctx.Err() - return - default: - } - if info != nil { - l.consumers <- info - } - } - if errors.Is(err, ErrEndOfData) { - return - } - } - }() - - return l -} - -func (s *consumerLister) Info() <-chan *ConsumerInfo { - return s.consumers -} - -func (s *consumerLister) Err() error { - return s.err -} - -// ConsumerNames returns a ConsumerNameLister enabling iterating over a -// channel of consumer names. -func (s *stream) ConsumerNames(ctx context.Context) ConsumerNameLister { - l := &consumerLister{ - js: s.jetStream, - names: make(chan string), - } - go func() { - defer close(l.names) - ctx, cancel := wrapContextWithoutDeadline(ctx) - if cancel != nil { - defer cancel() - } - for { - page, err := l.consumerNames(ctx, s.name) - if err != nil && !errors.Is(err, ErrEndOfData) { - l.err = err - return - } - for _, info := range page { - select { - case l.names <- info: - case <-ctx.Done(): - l.err = ctx.Err() - return - } - } - if errors.Is(err, ErrEndOfData) { - return - } - } - }() - - return l -} - -func (s *consumerLister) Name() <-chan string { - return s.names -} - -// consumerInfos fetches the next ConsumerInfo page -func (s *consumerLister) consumerInfos(ctx context.Context, stream string) ([]*ConsumerInfo, error) { - if s.pageInfo != nil && s.offset >= s.pageInfo.Total { - return nil, ErrEndOfData - } - - req, err := json.Marshal( - apiPagedRequest{Offset: s.offset}, - ) - if err != nil { - return nil, err - } - - slSubj := apiSubj(s.js.apiPrefix, fmt.Sprintf(apiConsumerListT, stream)) - var resp consumerListResponse - _, err = s.js.apiRequestJSON(ctx, slSubj, &resp, req) - if err != nil { - return nil, err - } - if resp.Error != nil { - return nil, resp.Error - } - - s.pageInfo = &resp.apiPaged - s.offset += len(resp.Consumers) - return resp.Consumers, nil -} - -// consumerNames fetches the next consumer names page -func (s *consumerLister) consumerNames(ctx context.Context, stream string) ([]string, error) { - if s.pageInfo != nil && s.offset >= s.pageInfo.Total { - return nil, ErrEndOfData - } - - req, err := json.Marshal( - apiPagedRequest{Offset: s.offset}, - ) - if err != nil { - return nil, err - } - - slSubj := apiSubj(s.js.apiPrefix, fmt.Sprintf(apiConsumerNamesT, stream)) - var resp consumerNamesResponse - _, err = s.js.apiRequestJSON(ctx, slSubj, &resp, req) - if err != nil { - return nil, err - } - if resp.Error != nil { - return nil, resp.Error - } - - s.pageInfo = &resp.apiPaged - s.offset += len(resp.Consumers) - return resp.Consumers, nil -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/stream_config.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/stream_config.go deleted file mode 100644 index dd1f9d9..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jetstream/stream_config.go +++ /dev/null @@ -1,606 +0,0 @@ -// Copyright 2022-2024 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jetstream - -import ( - "encoding/json" - "fmt" - "strings" - "time" - - "golang.org/x/text/cases" - "golang.org/x/text/language" -) - -type ( - // StreamInfo shows config and current state for this stream. - StreamInfo struct { - // Config contains the configuration settings of the stream, set when - // creating or updating the stream. - Config StreamConfig `json:"config"` - - // Created is the timestamp when the stream was created. - Created time.Time `json:"created"` - - // State provides the state of the stream at the time of request, - // including metrics like the number of messages in the stream, total - // bytes, etc. - State StreamState `json:"state"` - - // Cluster contains information about the cluster to which this stream - // belongs (if applicable). - Cluster *ClusterInfo `json:"cluster,omitempty"` - - // Mirror contains information about another stream this one is - // mirroring. Mirroring is used to create replicas of another stream's - // data. This field is omitted if the stream is not mirroring another - // stream. - Mirror *StreamSourceInfo `json:"mirror,omitempty"` - - // Sources is a list of source streams from which this stream collects - // data. - Sources []*StreamSourceInfo `json:"sources,omitempty"` - - // TimeStamp indicates when the info was gathered by the server. - TimeStamp time.Time `json:"ts"` - } - - // StreamConfig is the configuration of a JetStream stream. - StreamConfig struct { - // Name is the name of the stream. It is required and must be unique - // across the JetStream account. - // - // Name Names cannot contain whitespace, ., *, >, path separators - // (forward or backwards slash), and non-printable characters. - Name string `json:"name"` - - // Description is an optional description of the stream. - Description string `json:"description,omitempty"` - - // Subjects is a list of subjects that the stream is listening on. - // Wildcards are supported. Subjects cannot be set if the stream is - // created as a mirror. - Subjects []string `json:"subjects,omitempty"` - - // Retention defines the message retention policy for the stream. - // Defaults to LimitsPolicy. - Retention RetentionPolicy `json:"retention"` - - // MaxConsumers specifies the maximum number of consumers allowed for - // the stream. - MaxConsumers int `json:"max_consumers"` - - // MaxMsgs is the maximum number of messages the stream will store. - // After reaching the limit, stream adheres to the discard policy. - // If not set, server default is -1 (unlimited). - MaxMsgs int64 `json:"max_msgs"` - - // MaxBytes is the maximum total size of messages the stream will store. - // After reaching the limit, stream adheres to the discard policy. - // If not set, server default is -1 (unlimited). - MaxBytes int64 `json:"max_bytes"` - - // Discard defines the policy for handling messages when the stream - // reaches its limits in terms of number of messages or total bytes. - Discard DiscardPolicy `json:"discard"` - - // DiscardNewPerSubject is a flag to enable discarding new messages per - // subject when limits are reached. Requires DiscardPolicy to be - // DiscardNew and the MaxMsgsPerSubject to be set. - DiscardNewPerSubject bool `json:"discard_new_per_subject,omitempty"` - - // MaxAge is the maximum age of messages that the stream will retain. - MaxAge time.Duration `json:"max_age"` - - // MaxMsgsPerSubject is the maximum number of messages per subject that - // the stream will retain. - MaxMsgsPerSubject int64 `json:"max_msgs_per_subject"` - - // MaxMsgSize is the maximum size of any single message in the stream. - MaxMsgSize int32 `json:"max_msg_size,omitempty"` - - // Storage specifies the type of storage backend used for the stream - // (file or memory). - Storage StorageType `json:"storage"` - - // Replicas is the number of stream replicas in clustered JetStream. - // Defaults to 1, maximum is 5. - Replicas int `json:"num_replicas"` - - // NoAck is a flag to disable acknowledging messages received by this - // stream. - // - // If set to true, publish methods from the JetStream client will not - // work as expected, since they rely on acknowledgements. Core NATS - // publish methods should be used instead. Note that this will make - // message delivery less reliable. - NoAck bool `json:"no_ack,omitempty"` - - // Duplicates is the window within which to track duplicate messages. - // If not set, server default is 2 minutes. - Duplicates time.Duration `json:"duplicate_window,omitempty"` - - // Placement is used to declare where the stream should be placed via - // tags and/or an explicit cluster name. - Placement *Placement `json:"placement,omitempty"` - - // Mirror defines the configuration for mirroring another stream. - Mirror *StreamSource `json:"mirror,omitempty"` - - // Sources is a list of other streams this stream sources messages from. - Sources []*StreamSource `json:"sources,omitempty"` - - // Sealed streams do not allow messages to be published or deleted via limits or API, - // sealed streams can not be unsealed via configuration update. Can only - // be set on already created streams via the Update API. - Sealed bool `json:"sealed,omitempty"` - - // DenyDelete restricts the ability to delete messages from a stream via - // the API. Defaults to false. - DenyDelete bool `json:"deny_delete,omitempty"` - - // DenyPurge restricts the ability to purge messages from a stream via - // the API. Defaults to false. - DenyPurge bool `json:"deny_purge,omitempty"` - - // AllowRollup allows the use of the Nats-Rollup header to replace all - // contents of a stream, or subject in a stream, with a single new - // message. - AllowRollup bool `json:"allow_rollup_hdrs,omitempty"` - - // Compression specifies the message storage compression algorithm. - // Defaults to NoCompression. - Compression StoreCompression `json:"compression"` - - // FirstSeq is the initial sequence number of the first message in the - // stream. - FirstSeq uint64 `json:"first_seq,omitempty"` - - // SubjectTransform allows applying a transformation to matching - // messages' subjects. - SubjectTransform *SubjectTransformConfig `json:"subject_transform,omitempty"` - - // RePublish allows immediate republishing a message to the configured - // subject after it's stored. - RePublish *RePublish `json:"republish,omitempty"` - - // AllowDirect enables direct access to individual messages using direct - // get API. Defaults to false. - AllowDirect bool `json:"allow_direct"` - - // MirrorDirect enables direct access to individual messages from the - // origin stream using direct get API. Defaults to false. - MirrorDirect bool `json:"mirror_direct"` - - // ConsumerLimits defines limits of certain values that consumers can - // set, defaults for those who don't set these settings - ConsumerLimits StreamConsumerLimits `json:"consumer_limits,omitempty"` - - // Metadata is a set of application-defined key-value pairs for - // associating metadata on the stream. This feature requires nats-server - // v2.10.0 or later. - Metadata map[string]string `json:"metadata,omitempty"` - - // Template identifies the template that manages the Stream. DEPRECATED: - // This feature is no longer supported. - Template string `json:"template_owner,omitempty"` - } - - // StreamSourceInfo shows information about an upstream stream - // source/mirror. - StreamSourceInfo struct { - // Name is the name of the stream that is being replicated. - Name string `json:"name"` - - // Lag informs how many messages behind the source/mirror operation is. - // This will only show correctly if there is active communication - // with stream/mirror. - Lag uint64 `json:"lag"` - - // Active informs when last the mirror or sourced stream had activity. - // Value will be -1 when there has been no activity. - Active time.Duration `json:"active"` - - // FilterSubject is the subject filter defined for this source/mirror. - FilterSubject string `json:"filter_subject,omitempty"` - - // SubjectTransforms is a list of subject transforms defined for this - // source/mirror. - SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"` - } - - // StreamState is the state of a JetStream stream at the time of request. - StreamState struct { - // Msgs is the number of messages stored in the stream. - Msgs uint64 `json:"messages"` - - // Bytes is the number of bytes stored in the stream. - Bytes uint64 `json:"bytes"` - - // FirstSeq is the sequence number of the first message in the stream. - FirstSeq uint64 `json:"first_seq"` - - // FirstTime is the timestamp of the first message in the stream. - FirstTime time.Time `json:"first_ts"` - - // LastSeq is the sequence number of the last message in the stream. - LastSeq uint64 `json:"last_seq"` - - // LastTime is the timestamp of the last message in the stream. - LastTime time.Time `json:"last_ts"` - - // Consumers is the number of consumers on the stream. - Consumers int `json:"consumer_count"` - - // Deleted is a list of sequence numbers that have been removed from the - // stream. This field will only be returned if the stream has been - // fetched with the DeletedDetails option. - Deleted []uint64 `json:"deleted"` - - // NumDeleted is the number of messages that have been removed from the - // stream. Only deleted messages causing a gap in stream sequence numbers - // are counted. Messages deleted at the beginning or end of the stream - // are not counted. - NumDeleted int `json:"num_deleted"` - - // NumSubjects is the number of unique subjects the stream has received - // messages on. - NumSubjects uint64 `json:"num_subjects"` - - // Subjects is a map of subjects the stream has received messages on - // with message count per subject. This field will only be returned if - // the stream has been fetched with the SubjectFilter option. - Subjects map[string]uint64 `json:"subjects"` - } - - // ClusterInfo shows information about the underlying set of servers that - // make up the stream or consumer. - ClusterInfo struct { - // Name is the name of the cluster. - Name string `json:"name,omitempty"` - - // Leader is the server name of the RAFT leader. - Leader string `json:"leader,omitempty"` - - // Replicas is the list of members of the RAFT cluster - Replicas []*PeerInfo `json:"replicas,omitempty"` - } - - // PeerInfo shows information about the peers in the cluster that are - // supporting the stream or consumer. - PeerInfo struct { - // Name is the server name of the peer. - Name string `json:"name"` - - // Current indicates if the peer is up to date and synchronized with the - // leader. - Current bool `json:"current"` - - // Offline indicates if the peer is considered offline by the group. - Offline bool `json:"offline,omitempty"` - - // Active it the duration since this peer was last seen. - Active time.Duration `json:"active"` - - // Lag is the number of uncommitted operations this peer is behind the - // leader. - Lag uint64 `json:"lag,omitempty"` - } - - // SubjectTransformConfig is for applying a subject transform (to matching - // messages) before doing anything else when a new message is received. - SubjectTransformConfig struct { - // Source is the subject pattern to match incoming messages against. - Source string `json:"src"` - - // Destination is the subject pattern to remap the subject to. - Destination string `json:"dest"` - } - - // RePublish is for republishing messages once committed to a stream. The - // original subject is remapped from the subject pattern to the destination - // pattern. - RePublish struct { - // Source is the subject pattern to match incoming messages against. - Source string `json:"src,omitempty"` - - // Destination is the subject pattern to republish the subject to. - Destination string `json:"dest"` - - // HeadersOnly is a flag to indicate that only the headers should be - // republished. - HeadersOnly bool `json:"headers_only,omitempty"` - } - - // Placement is used to guide placement of streams in clustered JetStream. - Placement struct { - // Cluster is the name of the cluster to which the stream should be - // assigned. - Cluster string `json:"cluster"` - - // Tags are used to match streams to servers in the cluster. A stream - // will be assigned to a server with a matching tag. - Tags []string `json:"tags,omitempty"` - } - - // StreamSource dictates how streams can source from other streams. - StreamSource struct { - // Name is the name of the stream to source from. - Name string `json:"name"` - - // OptStartSeq is the sequence number to start sourcing from. - OptStartSeq uint64 `json:"opt_start_seq,omitempty"` - - // OptStartTime is the timestamp of messages to start sourcing from. - OptStartTime *time.Time `json:"opt_start_time,omitempty"` - - // FilterSubject is the subject filter used to only replicate messages - // with matching subjects. - FilterSubject string `json:"filter_subject,omitempty"` - - // SubjectTransforms is a list of subject transforms to apply to - // matching messages. - // - // Subject transforms on sources and mirrors are also used as subject - // filters with optional transformations. - SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"` - - // External is a configuration referencing a stream source in another - // account or JetStream domain. - External *ExternalStream `json:"external,omitempty"` - - // Domain is used to configure a stream source in another JetStream - // domain. This setting will set the External field with the appropriate - // APIPrefix. - Domain string `json:"-"` - } - - // ExternalStream allows you to qualify access to a stream source in another - // account. - ExternalStream struct { - // APIPrefix is the subject prefix that imports the other account/domain - // $JS.API.CONSUMER.> subjects. - APIPrefix string `json:"api"` - - // DeliverPrefix is the delivery subject to use for the push consumer. - DeliverPrefix string `json:"deliver"` - } - - // StreamConsumerLimits are the limits for a consumer on a stream. These can - // be overridden on a per consumer basis. - StreamConsumerLimits struct { - // InactiveThreshold is a duration which instructs the server to clean - // up the consumer if it has been inactive for the specified duration. - InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` - - // MaxAckPending is a maximum number of outstanding unacknowledged - // messages for a consumer. - MaxAckPending int `json:"max_ack_pending,omitempty"` - } - - // DiscardPolicy determines how to proceed when limits of messages or bytes - // are reached. - DiscardPolicy int - - // RetentionPolicy determines how messages in a stream are retained. - RetentionPolicy int - - // StorageType determines how messages are stored for retention. - StorageType int - - // StoreCompression determines how messages are compressed. - StoreCompression uint8 -) - -const ( - // LimitsPolicy (default) means that messages are retained until any given - // limit is reached. This could be one of MaxMsgs, MaxBytes, or MaxAge. - LimitsPolicy RetentionPolicy = iota - - // InterestPolicy specifies that when all known observables have - // acknowledged a message it can be removed. - InterestPolicy - - // WorkQueuePolicy specifies that when the first worker or subscriber - // acknowledges the message it can be removed. - WorkQueuePolicy -) - -const ( - // DiscardOld will remove older messages to return to the limits. This is - // the default. - DiscardOld DiscardPolicy = iota - - // DiscardNew will fail to store new messages once the limits are reached. - DiscardNew -) - -const ( - limitsPolicyString = "limits" - interestPolicyString = "interest" - workQueuePolicyString = "workqueue" -) - -func (rp RetentionPolicy) String() string { - switch rp { - case LimitsPolicy: - return "Limits" - case InterestPolicy: - return "Interest" - case WorkQueuePolicy: - return "WorkQueue" - default: - return "Unknown Retention Policy" - } -} - -func (rp RetentionPolicy) MarshalJSON() ([]byte, error) { - switch rp { - case LimitsPolicy: - return json.Marshal(limitsPolicyString) - case InterestPolicy: - return json.Marshal(interestPolicyString) - case WorkQueuePolicy: - return json.Marshal(workQueuePolicyString) - default: - return nil, fmt.Errorf("nats: can not marshal %v", rp) - } -} - -func (rp *RetentionPolicy) UnmarshalJSON(data []byte) error { - switch string(data) { - case jsonString(limitsPolicyString): - *rp = LimitsPolicy - case jsonString(interestPolicyString): - *rp = InterestPolicy - case jsonString(workQueuePolicyString): - *rp = WorkQueuePolicy - default: - return fmt.Errorf("nats: can not unmarshal %q", data) - } - return nil -} - -func (dp DiscardPolicy) String() string { - switch dp { - case DiscardOld: - return "DiscardOld" - case DiscardNew: - return "DiscardNew" - default: - return "Unknown Discard Policy" - } -} - -func (dp DiscardPolicy) MarshalJSON() ([]byte, error) { - switch dp { - case DiscardOld: - return json.Marshal("old") - case DiscardNew: - return json.Marshal("new") - default: - return nil, fmt.Errorf("nats: can not marshal %v", dp) - } -} - -func (dp *DiscardPolicy) UnmarshalJSON(data []byte) error { - switch strings.ToLower(string(data)) { - case jsonString("old"): - *dp = DiscardOld - case jsonString("new"): - *dp = DiscardNew - default: - return fmt.Errorf("nats: can not unmarshal %q", data) - } - return nil -} - -const ( - // FileStorage specifies on disk storage. It's the default. - FileStorage StorageType = iota - // MemoryStorage specifies in memory only. - MemoryStorage -) - -const ( - memoryStorageString = "memory" - fileStorageString = "file" -) - -func (st StorageType) String() string { - caser := cases.Title(language.AmericanEnglish) - switch st { - case MemoryStorage: - return caser.String(memoryStorageString) - case FileStorage: - return caser.String(fileStorageString) - default: - return "Unknown Storage Type" - } -} - -func (st StorageType) MarshalJSON() ([]byte, error) { - switch st { - case MemoryStorage: - return json.Marshal(memoryStorageString) - case FileStorage: - return json.Marshal(fileStorageString) - default: - return nil, fmt.Errorf("nats: can not marshal %v", st) - } -} - -func (st *StorageType) UnmarshalJSON(data []byte) error { - switch string(data) { - case jsonString(memoryStorageString): - *st = MemoryStorage - case jsonString(fileStorageString): - *st = FileStorage - default: - return fmt.Errorf("nats: can not unmarshal %q", data) - } - return nil -} - -func jsonString(s string) string { - return "\"" + s + "\"" -} - -const ( - // NoCompression disables compression on the stream. This is the default. - NoCompression StoreCompression = iota - - // S2Compression enables S2 compression on the stream. - S2Compression -) - -func (alg StoreCompression) String() string { - switch alg { - case NoCompression: - return "None" - case S2Compression: - return "S2" - default: - return "Unknown StoreCompression" - } -} - -func (alg StoreCompression) MarshalJSON() ([]byte, error) { - var str string - switch alg { - case S2Compression: - str = "s2" - case NoCompression: - str = "none" - default: - return nil, fmt.Errorf("unknown compression algorithm") - } - return json.Marshal(str) -} - -func (alg *StoreCompression) UnmarshalJSON(b []byte) error { - var str string - if err := json.Unmarshal(b, &str); err != nil { - return err - } - switch str { - case "s2": - *alg = S2Compression - case "none": - *alg = NoCompression - default: - return fmt.Errorf("unknown compression algorithm") - } - return nil -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/js.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/js.go deleted file mode 100644 index 462fea1..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/js.go +++ /dev/null @@ -1,3848 +0,0 @@ -// Copyright 2020-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/json" - "errors" - "fmt" - "math/rand" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/nats-io/nats.go/internal/parser" - "github.com/nats-io/nuid" -) - -// JetStream allows persistent messaging through JetStream. -// -// NOTE: JetStream is part of legacy API. -// Users are encouraged to switch to the new JetStream API for enhanced capabilities and -// simplified API. Please refer to the `jetstream` package. -// See: https://github.com/nats-io/nats.go/blob/main/jetstream/README.md -type JetStream interface { - // Publish publishes a message to JetStream. - Publish(subj string, data []byte, opts ...PubOpt) (*PubAck, error) - - // PublishMsg publishes a Msg to JetStream. - PublishMsg(m *Msg, opts ...PubOpt) (*PubAck, error) - - // PublishAsync publishes a message to JetStream and returns a PubAckFuture. - // The data should not be changed until the PubAckFuture has been processed. - PublishAsync(subj string, data []byte, opts ...PubOpt) (PubAckFuture, error) - - // PublishMsgAsync publishes a Msg to JetStream and returns a PubAckFuture. - // The message should not be changed until the PubAckFuture has been processed. - PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) - - // PublishAsyncPending returns the number of async publishes outstanding for this context. - PublishAsyncPending() int - - // PublishAsyncComplete returns a channel that will be closed when all outstanding messages are ack'd. - PublishAsyncComplete() <-chan struct{} - - // Subscribe creates an async Subscription for JetStream. - // The stream and consumer names can be provided with the nats.Bind() option. - // For creating an ephemeral (where the consumer name is picked by the server), - // you can provide the stream name with nats.BindStream(). - // If no stream name is specified, the library will attempt to figure out which - // stream the subscription is for. See important notes below for more details. - // - // IMPORTANT NOTES: - // * If none of the options Bind() nor Durable() are specified, the library will - // send a request to the server to create an ephemeral JetStream consumer, - // which will be deleted after an Unsubscribe() or Drain(), or automatically - // by the server after a short period of time after the NATS subscription is - // gone. - // * If Durable() option is specified, the library will attempt to lookup a JetStream - // consumer with this name, and if found, will bind to it and not attempt to - // delete it. However, if not found, the library will send a request to - // create such durable JetStream consumer. Note that the library will delete - // the JetStream consumer after an Unsubscribe() or Drain() only if it - // created the durable consumer while subscribing. If the durable consumer - // already existed prior to subscribing it won't be deleted. - // * If Bind() option is provided, the library will attempt to lookup the - // consumer with the given name, and if successful, bind to it. If the lookup fails, - // then the Subscribe() call will return an error. - Subscribe(subj string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) - - // SubscribeSync creates a Subscription that can be used to process messages synchronously. - // See important note in Subscribe() - SubscribeSync(subj string, opts ...SubOpt) (*Subscription, error) - - // ChanSubscribe creates channel based Subscription. - // See important note in Subscribe() - ChanSubscribe(subj string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) - - // ChanQueueSubscribe creates channel based Subscription with a queue group. - // See important note in QueueSubscribe() - ChanQueueSubscribe(subj, queue string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) - - // QueueSubscribe creates a Subscription with a queue group. - // If no optional durable name nor binding options are specified, the queue name will be used as a durable name. - // See important note in Subscribe() - QueueSubscribe(subj, queue string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) - - // QueueSubscribeSync creates a Subscription with a queue group that can be used to process messages synchronously. - // See important note in QueueSubscribe() - QueueSubscribeSync(subj, queue string, opts ...SubOpt) (*Subscription, error) - - // PullSubscribe creates a Subscription that can fetch messages. - // See important note in Subscribe(). Additionally, for an ephemeral pull consumer, the "durable" value must be - // set to an empty string. - PullSubscribe(subj, durable string, opts ...SubOpt) (*Subscription, error) -} - -// JetStreamContext allows JetStream messaging and stream management. -// -// NOTE: JetStreamContext is part of legacy API. -// Users are encouraged to switch to the new JetStream API for enhanced capabilities and -// simplified API. Please refer to the `jetstream` package. -// See: https://github.com/nats-io/nats.go/blob/main/jetstream/README.md -type JetStreamContext interface { - JetStream - JetStreamManager - KeyValueManager - ObjectStoreManager -} - -// Request API subjects for JetStream. -const ( - // defaultAPIPrefix is the default prefix for the JetStream API. - defaultAPIPrefix = "$JS.API." - - // jsDomainT is used to create JetStream API prefix by specifying only Domain - jsDomainT = "$JS.%s.API." - - // jsExtDomainT is used to create a StreamSource External APIPrefix - jsExtDomainT = "$JS.%s.API" - - // apiAccountInfo is for obtaining general information about JetStream. - apiAccountInfo = "INFO" - - // apiConsumerCreateT is used to create consumers. - // it accepts stream name and consumer name. - apiConsumerCreateT = "CONSUMER.CREATE.%s.%s" - - // apiConsumerCreateT is used to create consumers. - // it accepts stream name, consumer name and filter subject - apiConsumerCreateWithFilterSubjectT = "CONSUMER.CREATE.%s.%s.%s" - - // apiLegacyConsumerCreateT is used to create consumers. - // this is a legacy endpoint to support creating ephemerals before nats-server v2.9.0. - apiLegacyConsumerCreateT = "CONSUMER.CREATE.%s" - - // apiDurableCreateT is used to create durable consumers. - // this is a legacy endpoint to support creating durable consumers before nats-server v2.9.0. - apiDurableCreateT = "CONSUMER.DURABLE.CREATE.%s.%s" - - // apiConsumerInfoT is used to create consumers. - apiConsumerInfoT = "CONSUMER.INFO.%s.%s" - - // apiRequestNextT is the prefix for the request next message(s) for a consumer in worker/pull mode. - apiRequestNextT = "CONSUMER.MSG.NEXT.%s.%s" - - // apiConsumerDeleteT is used to delete consumers. - apiConsumerDeleteT = "CONSUMER.DELETE.%s.%s" - - // apiConsumerListT is used to return all detailed consumer information - apiConsumerListT = "CONSUMER.LIST.%s" - - // apiConsumerNamesT is used to return a list with all consumer names for the stream. - apiConsumerNamesT = "CONSUMER.NAMES.%s" - - // apiStreams can lookup a stream by subject. - apiStreams = "STREAM.NAMES" - - // apiStreamCreateT is the endpoint to create new streams. - apiStreamCreateT = "STREAM.CREATE.%s" - - // apiStreamInfoT is the endpoint to get information on a stream. - apiStreamInfoT = "STREAM.INFO.%s" - - // apiStreamUpdateT is the endpoint to update existing streams. - apiStreamUpdateT = "STREAM.UPDATE.%s" - - // apiStreamDeleteT is the endpoint to delete streams. - apiStreamDeleteT = "STREAM.DELETE.%s" - - // apiStreamPurgeT is the endpoint to purge streams. - apiStreamPurgeT = "STREAM.PURGE.%s" - - // apiStreamListT is the endpoint that will return all detailed stream information - apiStreamListT = "STREAM.LIST" - - // apiMsgGetT is the endpoint to get a message. - apiMsgGetT = "STREAM.MSG.GET.%s" - - // apiMsgGetT is the endpoint to perform a direct get of a message. - apiDirectMsgGetT = "DIRECT.GET.%s" - - // apiDirectMsgGetLastBySubjectT is the endpoint to perform a direct get of a message by subject. - apiDirectMsgGetLastBySubjectT = "DIRECT.GET.%s.%s" - - // apiMsgDeleteT is the endpoint to remove a message. - apiMsgDeleteT = "STREAM.MSG.DELETE.%s" - - // orderedHeartbeatsInterval is how fast we want HBs from the server during idle. - orderedHeartbeatsInterval = 5 * time.Second - - // Scale for threshold of missed HBs or lack of activity. - hbcThresh = 2 - - // For ChanSubscription, we can't update sub.delivered as we do for other - // type of subscriptions, since the channel is user provided. - // With flow control in play, we will check for flow control on incoming - // messages (as opposed to when they are delivered), but also from a go - // routine. Without this, the subscription would possibly stall until - // a new message or heartbeat/fc are received. - chanSubFCCheckInterval = 250 * time.Millisecond - - // Default time wait between retries on Publish iff err is NoResponders. - DefaultPubRetryWait = 250 * time.Millisecond - - // Default number of retries - DefaultPubRetryAttempts = 2 - - // defaultAsyncPubAckInflight is the number of async pub acks inflight. - defaultAsyncPubAckInflight = 4000 -) - -// Types of control messages, so far heartbeat and flow control -const ( - jsCtrlHB = 1 - jsCtrlFC = 2 -) - -// js is an internal struct from a JetStreamContext. -type js struct { - nc *Conn - opts *jsOpts - - // For async publish context. - mu sync.RWMutex - rpre string - rsub *Subscription - pafs map[string]*pubAckFuture - stc chan struct{} - dch chan struct{} - rr *rand.Rand - connStatusCh chan (Status) - replyPrefix string - replyPrefixLen int -} - -type jsOpts struct { - ctx context.Context - // For importing JetStream from other accounts. - pre string - // Amount of time to wait for API requests. - wait time.Duration - // For async publish error handling. - aecb MsgErrHandler - // Max async pub ack in flight - maxpa int - // the domain that produced the pre - domain string - // enables protocol tracing - ctrace ClientTrace - shouldTrace bool - // purgeOpts contains optional stream purge options - purgeOpts *StreamPurgeRequest - // streamInfoOpts contains optional stream info options - streamInfoOpts *StreamInfoRequest - // streamListSubject is used for subject filtering when listing streams / stream names - streamListSubject string - // For direct get message requests - directGet bool - // For direct get next message - directNextFor string - - // featureFlags are used to enable/disable specific JetStream features - featureFlags featureFlags -} - -const ( - defaultRequestWait = 5 * time.Second - defaultAccountCheck = 20 * time.Second -) - -// JetStream returns a JetStreamContext for messaging and stream management. -// Errors are only returned if inconsistent options are provided. -// -// NOTE: JetStreamContext is part of legacy API. -// Users are encouraged to switch to the new JetStream API for enhanced capabilities and -// simplified API. Please refer to the `jetstream` package. -// See: https://github.com/nats-io/nats.go/blob/main/jetstream/README.md -func (nc *Conn) JetStream(opts ...JSOpt) (JetStreamContext, error) { - js := &js{ - nc: nc, - opts: &jsOpts{ - pre: defaultAPIPrefix, - wait: defaultRequestWait, - maxpa: defaultAsyncPubAckInflight, - }, - } - inboxPrefix := InboxPrefix - if js.nc.Opts.InboxPrefix != _EMPTY_ { - inboxPrefix = js.nc.Opts.InboxPrefix + "." - } - js.replyPrefix = inboxPrefix - js.replyPrefixLen = len(js.replyPrefix) + aReplyTokensize + 1 - - for _, opt := range opts { - if err := opt.configureJSContext(js.opts); err != nil { - return nil, err - } - } - return js, nil -} - -// JSOpt configures a JetStreamContext. -type JSOpt interface { - configureJSContext(opts *jsOpts) error -} - -// jsOptFn configures an option for the JetStreamContext. -type jsOptFn func(opts *jsOpts) error - -func (opt jsOptFn) configureJSContext(opts *jsOpts) error { - return opt(opts) -} - -type featureFlags struct { - useDurableConsumerCreate bool -} - -// UseLegacyDurableConsumers makes JetStream use the legacy (pre nats-server v2.9.0) subjects for consumer creation. -// If this option is used when creating JetStremContext, $JS.API.CONSUMER.DURABLE.CREATE.. will be used -// to create a consumer with Durable provided, rather than $JS.API.CONSUMER.CREATE... -func UseLegacyDurableConsumers() JSOpt { - return jsOptFn(func(opts *jsOpts) error { - opts.featureFlags.useDurableConsumerCreate = true - return nil - }) -} - -// ClientTrace can be used to trace API interactions for the JetStream Context. -type ClientTrace struct { - RequestSent func(subj string, payload []byte) - ResponseReceived func(subj string, payload []byte, hdr Header) -} - -func (ct ClientTrace) configureJSContext(js *jsOpts) error { - js.ctrace = ct - js.shouldTrace = true - return nil -} - -// Domain changes the domain part of JetStream API prefix. -func Domain(domain string) JSOpt { - if domain == _EMPTY_ { - return APIPrefix(_EMPTY_) - } - - return jsOptFn(func(js *jsOpts) error { - js.domain = domain - js.pre = fmt.Sprintf(jsDomainT, domain) - - return nil - }) - -} - -func (s *StreamPurgeRequest) configureJSContext(js *jsOpts) error { - js.purgeOpts = s - return nil -} - -func (s *StreamInfoRequest) configureJSContext(js *jsOpts) error { - js.streamInfoOpts = s - return nil -} - -// APIPrefix changes the default prefix used for the JetStream API. -func APIPrefix(pre string) JSOpt { - return jsOptFn(func(js *jsOpts) error { - if pre == _EMPTY_ { - return nil - } - - js.pre = pre - if !strings.HasSuffix(js.pre, ".") { - js.pre = js.pre + "." - } - - return nil - }) -} - -// DirectGet is an option that can be used to make GetMsg() or GetLastMsg() -// retrieve message directly from a group of servers (leader and replicas) -// if the stream was created with the AllowDirect option. -func DirectGet() JSOpt { - return jsOptFn(func(js *jsOpts) error { - js.directGet = true - return nil - }) -} - -// DirectGetNext is an option that can be used to make GetMsg() retrieve message -// directly from a group of servers (leader and replicas) if the stream was -// created with the AllowDirect option. -// The server will find the next message matching the filter `subject` starting -// at the start sequence (argument in GetMsg()). The filter `subject` can be a -// wildcard. -func DirectGetNext(subject string) JSOpt { - return jsOptFn(func(js *jsOpts) error { - js.directGet = true - js.directNextFor = subject - return nil - }) -} - -// StreamListFilter is an option that can be used to configure `StreamsInfo()` and `StreamNames()` requests. -// It allows filtering the returned streams by subject associated with each stream. -// Wildcards can be used. For example, `StreamListFilter(FOO.*.A) will return -// all streams which have at least one subject matching the provided pattern (e.g. FOO.TEST.A). -func StreamListFilter(subject string) JSOpt { - return jsOptFn(func(opts *jsOpts) error { - opts.streamListSubject = subject - return nil - }) -} - -func (js *js) apiSubj(subj string) string { - if js.opts.pre == _EMPTY_ { - return subj - } - var b strings.Builder - b.WriteString(js.opts.pre) - b.WriteString(subj) - return b.String() -} - -// PubOpt configures options for publishing JetStream messages. -type PubOpt interface { - configurePublish(opts *pubOpts) error -} - -// pubOptFn is a function option used to configure JetStream Publish. -type pubOptFn func(opts *pubOpts) error - -func (opt pubOptFn) configurePublish(opts *pubOpts) error { - return opt(opts) -} - -type pubOpts struct { - ctx context.Context - ttl time.Duration - id string - lid string // Expected last msgId - str string // Expected stream name - seq *uint64 // Expected last sequence - lss *uint64 // Expected last sequence per subject - - // Publish retries for NoResponders err. - rwait time.Duration // Retry wait between attempts - rnum int // Retry attempts - - // stallWait is the max wait of a async pub ack. - stallWait time.Duration -} - -// pubAckResponse is the ack response from the JetStream API when publishing a message. -type pubAckResponse struct { - apiResponse - *PubAck -} - -// PubAck is an ack received after successfully publishing a message. -type PubAck struct { - Stream string `json:"stream"` - Sequence uint64 `json:"seq"` - Duplicate bool `json:"duplicate,omitempty"` - Domain string `json:"domain,omitempty"` -} - -// Headers for published messages. -const ( - MsgIdHdr = "Nats-Msg-Id" - ExpectedStreamHdr = "Nats-Expected-Stream" - ExpectedLastSeqHdr = "Nats-Expected-Last-Sequence" - ExpectedLastSubjSeqHdr = "Nats-Expected-Last-Subject-Sequence" - ExpectedLastMsgIdHdr = "Nats-Expected-Last-Msg-Id" - MsgRollup = "Nats-Rollup" -) - -// Headers for republished messages and direct gets. -const ( - JSStream = "Nats-Stream" - JSSequence = "Nats-Sequence" - JSTimeStamp = "Nats-Time-Stamp" - JSSubject = "Nats-Subject" - JSLastSequence = "Nats-Last-Sequence" -) - -// MsgSize is a header that will be part of a consumer's delivered message if HeadersOnly requested. -const MsgSize = "Nats-Msg-Size" - -// Rollups, can be subject only or all messages. -const ( - MsgRollupSubject = "sub" - MsgRollupAll = "all" -) - -// PublishMsg publishes a Msg to a stream from JetStream. -func (js *js) PublishMsg(m *Msg, opts ...PubOpt) (*PubAck, error) { - var o = pubOpts{rwait: DefaultPubRetryWait, rnum: DefaultPubRetryAttempts} - if len(opts) > 0 { - if m.Header == nil { - m.Header = Header{} - } - for _, opt := range opts { - if err := opt.configurePublish(&o); err != nil { - return nil, err - } - } - } - // Check for option collisions. Right now just timeout and context. - if o.ctx != nil && o.ttl != 0 { - return nil, ErrContextAndTimeout - } - if o.ttl == 0 && o.ctx == nil { - o.ttl = js.opts.wait - } - if o.stallWait > 0 { - return nil, fmt.Errorf("nats: stall wait cannot be set to sync publish") - } - - if o.id != _EMPTY_ { - m.Header.Set(MsgIdHdr, o.id) - } - if o.lid != _EMPTY_ { - m.Header.Set(ExpectedLastMsgIdHdr, o.lid) - } - if o.str != _EMPTY_ { - m.Header.Set(ExpectedStreamHdr, o.str) - } - if o.seq != nil { - m.Header.Set(ExpectedLastSeqHdr, strconv.FormatUint(*o.seq, 10)) - } - if o.lss != nil { - m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(*o.lss, 10)) - } - - var resp *Msg - var err error - - if o.ttl > 0 { - resp, err = js.nc.RequestMsg(m, time.Duration(o.ttl)) - } else { - resp, err = js.nc.RequestMsgWithContext(o.ctx, m) - } - - if err != nil { - for r, ttl := 0, o.ttl; errors.Is(err, ErrNoResponders) && (r < o.rnum || o.rnum < 0); r++ { - // To protect against small blips in leadership changes etc, if we get a no responders here retry. - if o.ctx != nil { - select { - case <-o.ctx.Done(): - case <-time.After(o.rwait): - } - } else { - time.Sleep(o.rwait) - } - if o.ttl > 0 { - ttl -= o.rwait - if ttl <= 0 { - err = ErrTimeout - break - } - resp, err = js.nc.RequestMsg(m, time.Duration(ttl)) - } else { - resp, err = js.nc.RequestMsgWithContext(o.ctx, m) - } - } - if err != nil { - if errors.Is(err, ErrNoResponders) { - err = ErrNoStreamResponse - } - return nil, err - } - } - - var pa pubAckResponse - if err := json.Unmarshal(resp.Data, &pa); err != nil { - return nil, ErrInvalidJSAck - } - if pa.Error != nil { - return nil, pa.Error - } - if pa.PubAck == nil || pa.PubAck.Stream == _EMPTY_ { - return nil, ErrInvalidJSAck - } - return pa.PubAck, nil -} - -// Publish publishes a message to a stream from JetStream. -func (js *js) Publish(subj string, data []byte, opts ...PubOpt) (*PubAck, error) { - return js.PublishMsg(&Msg{Subject: subj, Data: data}, opts...) -} - -// PubAckFuture is a future for a PubAck. -type PubAckFuture interface { - // Ok returns a receive only channel that can be used to get a PubAck. - Ok() <-chan *PubAck - - // Err returns a receive only channel that can be used to get the error from an async publish. - Err() <-chan error - - // Msg returns the message that was sent to the server. - Msg() *Msg -} - -type pubAckFuture struct { - js *js - msg *Msg - pa *PubAck - st time.Time - err error - errCh chan error - doneCh chan *PubAck -} - -func (paf *pubAckFuture) Ok() <-chan *PubAck { - paf.js.mu.Lock() - defer paf.js.mu.Unlock() - - if paf.doneCh == nil { - paf.doneCh = make(chan *PubAck, 1) - if paf.pa != nil { - paf.doneCh <- paf.pa - } - } - - return paf.doneCh -} - -func (paf *pubAckFuture) Err() <-chan error { - paf.js.mu.Lock() - defer paf.js.mu.Unlock() - - if paf.errCh == nil { - paf.errCh = make(chan error, 1) - if paf.err != nil { - paf.errCh <- paf.err - } - } - - return paf.errCh -} - -func (paf *pubAckFuture) Msg() *Msg { - paf.js.mu.RLock() - defer paf.js.mu.RUnlock() - return paf.msg -} - -// For quick token lookup etc. -const aReplyTokensize = 6 - -func (js *js) newAsyncReply() string { - js.mu.Lock() - if js.rsub == nil { - // Create our wildcard reply subject. - sha := sha256.New() - sha.Write([]byte(nuid.Next())) - b := sha.Sum(nil) - for i := 0; i < aReplyTokensize; i++ { - b[i] = rdigits[int(b[i]%base)] - } - js.rpre = fmt.Sprintf("%s%s.", js.replyPrefix, b[:aReplyTokensize]) - sub, err := js.nc.Subscribe(fmt.Sprintf("%s*", js.rpre), js.handleAsyncReply) - if err != nil { - js.mu.Unlock() - return _EMPTY_ - } - js.rsub = sub - js.rr = rand.New(rand.NewSource(time.Now().UnixNano())) - } - if js.connStatusCh == nil { - js.connStatusCh = js.nc.StatusChanged(RECONNECTING, CLOSED) - go js.resetPendingAcksOnReconnect() - } - var sb strings.Builder - sb.WriteString(js.rpre) - rn := js.rr.Int63() - var b [aReplyTokensize]byte - for i, l := 0, rn; i < len(b); i++ { - b[i] = rdigits[l%base] - l /= base - } - sb.Write(b[:]) - js.mu.Unlock() - return sb.String() -} - -func (js *js) resetPendingAcksOnReconnect() { - js.mu.Lock() - connStatusCh := js.connStatusCh - js.mu.Unlock() - for { - newStatus, ok := <-connStatusCh - if !ok || newStatus == CLOSED { - return - } - js.mu.Lock() - for _, paf := range js.pafs { - paf.err = ErrDisconnected - } - js.pafs = nil - if js.dch != nil { - close(js.dch) - js.dch = nil - } - js.mu.Unlock() - } -} - -func (js *js) cleanupReplySub() { - js.mu.Lock() - if js.rsub != nil { - js.rsub.Unsubscribe() - js.rsub = nil - } - if js.connStatusCh != nil { - close(js.connStatusCh) - js.connStatusCh = nil - } - js.mu.Unlock() -} - -// registerPAF will register for a PubAckFuture. -func (js *js) registerPAF(id string, paf *pubAckFuture) (int, int) { - js.mu.Lock() - if js.pafs == nil { - js.pafs = make(map[string]*pubAckFuture) - } - paf.js = js - js.pafs[id] = paf - np := len(js.pafs) - maxpa := js.opts.maxpa - js.mu.Unlock() - return np, maxpa -} - -// Lock should be held. -func (js *js) getPAF(id string) *pubAckFuture { - if js.pafs == nil { - return nil - } - return js.pafs[id] -} - -// clearPAF will remove a PubAckFuture that was registered. -func (js *js) clearPAF(id string) { - js.mu.Lock() - delete(js.pafs, id) - js.mu.Unlock() -} - -// PublishAsyncPending returns how many PubAckFutures are pending. -func (js *js) PublishAsyncPending() int { - js.mu.RLock() - defer js.mu.RUnlock() - return len(js.pafs) -} - -func (js *js) asyncStall() <-chan struct{} { - js.mu.Lock() - if js.stc == nil { - js.stc = make(chan struct{}) - } - stc := js.stc - js.mu.Unlock() - return stc -} - -// Handle an async reply from PublishAsync. -func (js *js) handleAsyncReply(m *Msg) { - if len(m.Subject) <= js.replyPrefixLen { - return - } - id := m.Subject[js.replyPrefixLen:] - - js.mu.Lock() - paf := js.getPAF(id) - if paf == nil { - js.mu.Unlock() - return - } - // Remove - delete(js.pafs, id) - - // Check on anyone stalled and waiting. - if js.stc != nil && len(js.pafs) < js.opts.maxpa { - close(js.stc) - js.stc = nil - } - // Check on anyone one waiting on done status. - if js.dch != nil && len(js.pafs) == 0 { - dch := js.dch - js.dch = nil - // Defer here so error is processed and can be checked. - defer close(dch) - } - - doErr := func(err error) { - paf.err = err - if paf.errCh != nil { - paf.errCh <- paf.err - } - cb := js.opts.aecb - js.mu.Unlock() - if cb != nil { - cb(paf.js, paf.msg, err) - } - } - - // Process no responders etc. - if len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders { - doErr(ErrNoResponders) - return - } - - var pa pubAckResponse - if err := json.Unmarshal(m.Data, &pa); err != nil { - doErr(ErrInvalidJSAck) - return - } - if pa.Error != nil { - doErr(pa.Error) - return - } - if pa.PubAck == nil || pa.PubAck.Stream == _EMPTY_ { - doErr(ErrInvalidJSAck) - return - } - - // So here we have received a proper puback. - paf.pa = pa.PubAck - if paf.doneCh != nil { - paf.doneCh <- paf.pa - } - js.mu.Unlock() -} - -// MsgErrHandler is used to process asynchronous errors from -// JetStream PublishAsync. It will return the original -// message sent to the server for possible retransmitting and the error encountered. -type MsgErrHandler func(JetStream, *Msg, error) - -// PublishAsyncErrHandler sets the error handler for async publishes in JetStream. -func PublishAsyncErrHandler(cb MsgErrHandler) JSOpt { - return jsOptFn(func(js *jsOpts) error { - js.aecb = cb - return nil - }) -} - -// PublishAsyncMaxPending sets the maximum outstanding async publishes that can be inflight at one time. -func PublishAsyncMaxPending(max int) JSOpt { - return jsOptFn(func(js *jsOpts) error { - if max < 1 { - return errors.New("nats: max ack pending should be >= 1") - } - js.maxpa = max - return nil - }) -} - -// PublishAsync publishes a message to JetStream and returns a PubAckFuture -func (js *js) PublishAsync(subj string, data []byte, opts ...PubOpt) (PubAckFuture, error) { - return js.PublishMsgAsync(&Msg{Subject: subj, Data: data}, opts...) -} - -const defaultStallWait = 200 * time.Millisecond - -func (js *js) PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) { - var o pubOpts - if len(opts) > 0 { - if m.Header == nil { - m.Header = Header{} - } - for _, opt := range opts { - if err := opt.configurePublish(&o); err != nil { - return nil, err - } - } - } - - // Timeouts and contexts do not make sense for these. - if o.ttl != 0 || o.ctx != nil { - return nil, ErrContextAndTimeout - } - stallWait := defaultStallWait - if o.stallWait > 0 { - stallWait = o.stallWait - } - - // FIXME(dlc) - Make common. - if o.id != _EMPTY_ { - m.Header.Set(MsgIdHdr, o.id) - } - if o.lid != _EMPTY_ { - m.Header.Set(ExpectedLastMsgIdHdr, o.lid) - } - if o.str != _EMPTY_ { - m.Header.Set(ExpectedStreamHdr, o.str) - } - if o.seq != nil { - m.Header.Set(ExpectedLastSeqHdr, strconv.FormatUint(*o.seq, 10)) - } - if o.lss != nil { - m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(*o.lss, 10)) - } - - // Reply - if m.Reply != _EMPTY_ { - return nil, errors.New("nats: reply subject should be empty") - } - reply := m.Reply - m.Reply = js.newAsyncReply() - defer func() { m.Reply = reply }() - - if m.Reply == _EMPTY_ { - return nil, errors.New("nats: error creating async reply handler") - } - - id := m.Reply[js.replyPrefixLen:] - paf := &pubAckFuture{msg: m, st: time.Now()} - numPending, maxPending := js.registerPAF(id, paf) - - if maxPending > 0 && numPending >= maxPending { - select { - case <-js.asyncStall(): - case <-time.After(stallWait): - js.clearPAF(id) - return nil, errors.New("nats: stalled with too many outstanding async published messages") - } - } - if err := js.nc.PublishMsg(m); err != nil { - js.clearPAF(id) - return nil, err - } - - return paf, nil -} - -// PublishAsyncComplete returns a channel that will be closed when all outstanding messages have been ack'd. -func (js *js) PublishAsyncComplete() <-chan struct{} { - js.mu.Lock() - defer js.mu.Unlock() - if js.dch == nil { - js.dch = make(chan struct{}) - } - dch := js.dch - if len(js.pafs) == 0 { - close(js.dch) - js.dch = nil - } - return dch -} - -// MsgId sets the message ID used for deduplication. -func MsgId(id string) PubOpt { - return pubOptFn(func(opts *pubOpts) error { - opts.id = id - return nil - }) -} - -// ExpectStream sets the expected stream to respond from the publish. -func ExpectStream(stream string) PubOpt { - return pubOptFn(func(opts *pubOpts) error { - opts.str = stream - return nil - }) -} - -// ExpectLastSequence sets the expected sequence in the response from the publish. -func ExpectLastSequence(seq uint64) PubOpt { - return pubOptFn(func(opts *pubOpts) error { - opts.seq = &seq - return nil - }) -} - -// ExpectLastSequencePerSubject sets the expected sequence per subject in the response from the publish. -func ExpectLastSequencePerSubject(seq uint64) PubOpt { - return pubOptFn(func(opts *pubOpts) error { - opts.lss = &seq - return nil - }) -} - -// ExpectLastMsgId sets the expected last msgId in the response from the publish. -func ExpectLastMsgId(id string) PubOpt { - return pubOptFn(func(opts *pubOpts) error { - opts.lid = id - return nil - }) -} - -// RetryWait sets the retry wait time when ErrNoResponders is encountered. -func RetryWait(dur time.Duration) PubOpt { - return pubOptFn(func(opts *pubOpts) error { - opts.rwait = dur - return nil - }) -} - -// RetryAttempts sets the retry number of attempts when ErrNoResponders is encountered. -func RetryAttempts(num int) PubOpt { - return pubOptFn(func(opts *pubOpts) error { - opts.rnum = num - return nil - }) -} - -// StallWait sets the max wait when the producer becomes stall producing messages. -func StallWait(ttl time.Duration) PubOpt { - return pubOptFn(func(opts *pubOpts) error { - if ttl <= 0 { - return fmt.Errorf("nats: stall wait should be more than 0") - } - opts.stallWait = ttl - return nil - }) -} - -type ackOpts struct { - ttl time.Duration - ctx context.Context - nakDelay time.Duration -} - -// AckOpt are the options that can be passed when acknowledge a message. -type AckOpt interface { - configureAck(opts *ackOpts) error -} - -// MaxWait sets the maximum amount of time we will wait for a response. -type MaxWait time.Duration - -func (ttl MaxWait) configureJSContext(js *jsOpts) error { - js.wait = time.Duration(ttl) - return nil -} - -func (ttl MaxWait) configurePull(opts *pullOpts) error { - opts.ttl = time.Duration(ttl) - return nil -} - -// AckWait sets the maximum amount of time we will wait for an ack. -type AckWait time.Duration - -func (ttl AckWait) configurePublish(opts *pubOpts) error { - opts.ttl = time.Duration(ttl) - return nil -} - -func (ttl AckWait) configureSubscribe(opts *subOpts) error { - opts.cfg.AckWait = time.Duration(ttl) - return nil -} - -func (ttl AckWait) configureAck(opts *ackOpts) error { - opts.ttl = time.Duration(ttl) - return nil -} - -// ContextOpt is an option used to set a context.Context. -type ContextOpt struct { - context.Context -} - -func (ctx ContextOpt) configureJSContext(opts *jsOpts) error { - opts.ctx = ctx - return nil -} - -func (ctx ContextOpt) configurePublish(opts *pubOpts) error { - opts.ctx = ctx - return nil -} - -func (ctx ContextOpt) configureSubscribe(opts *subOpts) error { - opts.ctx = ctx - return nil -} - -func (ctx ContextOpt) configurePull(opts *pullOpts) error { - opts.ctx = ctx - return nil -} - -func (ctx ContextOpt) configureAck(opts *ackOpts) error { - opts.ctx = ctx - return nil -} - -// Context returns an option that can be used to configure a context for APIs -// that are context aware such as those part of the JetStream interface. -func Context(ctx context.Context) ContextOpt { - return ContextOpt{ctx} -} - -type nakDelay time.Duration - -func (d nakDelay) configureAck(opts *ackOpts) error { - opts.nakDelay = time.Duration(d) - return nil -} - -// Subscribe - -// ConsumerConfig is the configuration of a JetStream consumer. -type ConsumerConfig struct { - Durable string `json:"durable_name,omitempty"` - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - DeliverPolicy DeliverPolicy `json:"deliver_policy"` - OptStartSeq uint64 `json:"opt_start_seq,omitempty"` - OptStartTime *time.Time `json:"opt_start_time,omitempty"` - AckPolicy AckPolicy `json:"ack_policy"` - AckWait time.Duration `json:"ack_wait,omitempty"` - MaxDeliver int `json:"max_deliver,omitempty"` - BackOff []time.Duration `json:"backoff,omitempty"` - FilterSubject string `json:"filter_subject,omitempty"` - FilterSubjects []string `json:"filter_subjects,omitempty"` - ReplayPolicy ReplayPolicy `json:"replay_policy"` - RateLimit uint64 `json:"rate_limit_bps,omitempty"` // Bits per sec - SampleFrequency string `json:"sample_freq,omitempty"` - MaxWaiting int `json:"max_waiting,omitempty"` - MaxAckPending int `json:"max_ack_pending,omitempty"` - FlowControl bool `json:"flow_control,omitempty"` - Heartbeat time.Duration `json:"idle_heartbeat,omitempty"` - HeadersOnly bool `json:"headers_only,omitempty"` - - // Pull based options. - MaxRequestBatch int `json:"max_batch,omitempty"` - MaxRequestExpires time.Duration `json:"max_expires,omitempty"` - MaxRequestMaxBytes int `json:"max_bytes,omitempty"` - - // Push based consumers. - DeliverSubject string `json:"deliver_subject,omitempty"` - DeliverGroup string `json:"deliver_group,omitempty"` - - // Inactivity threshold. - InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` - - // Generally inherited by parent stream and other markers, now can be configured directly. - Replicas int `json:"num_replicas"` - // Force memory storage. - MemoryStorage bool `json:"mem_storage,omitempty"` - - // Metadata is additional metadata for the Consumer. - // Keys starting with `_nats` are reserved. - // NOTE: Metadata requires nats-server v2.10.0+ - Metadata map[string]string `json:"metadata,omitempty"` -} - -// ConsumerInfo is the info from a JetStream consumer. -type ConsumerInfo struct { - Stream string `json:"stream_name"` - Name string `json:"name"` - Created time.Time `json:"created"` - Config ConsumerConfig `json:"config"` - Delivered SequenceInfo `json:"delivered"` - AckFloor SequenceInfo `json:"ack_floor"` - NumAckPending int `json:"num_ack_pending"` - NumRedelivered int `json:"num_redelivered"` - NumWaiting int `json:"num_waiting"` - NumPending uint64 `json:"num_pending"` - Cluster *ClusterInfo `json:"cluster,omitempty"` - PushBound bool `json:"push_bound,omitempty"` -} - -// SequenceInfo has both the consumer and the stream sequence and last activity. -type SequenceInfo struct { - Consumer uint64 `json:"consumer_seq"` - Stream uint64 `json:"stream_seq"` - Last *time.Time `json:"last_active,omitempty"` -} - -// SequencePair includes the consumer and stream sequence info from a JetStream consumer. -type SequencePair struct { - Consumer uint64 `json:"consumer_seq"` - Stream uint64 `json:"stream_seq"` -} - -// nextRequest is for getting next messages for pull based consumers from JetStream. -type nextRequest struct { - Expires time.Duration `json:"expires,omitempty"` - Batch int `json:"batch,omitempty"` - NoWait bool `json:"no_wait,omitempty"` - MaxBytes int `json:"max_bytes,omitempty"` - Heartbeat time.Duration `json:"idle_heartbeat,omitempty"` -} - -// jsSub includes JetStream subscription info. -type jsSub struct { - js *js - - // For pull subscribers, this is the next message subject to send requests to. - nms string - - psubj string // the subject that was passed by user to the subscribe calls - consumer string - stream string - deliver string - pull bool - dc bool // Delete JS consumer - ackNone bool - - // This is ConsumerInfo's Pending+Consumer.Delivered that we get from the - // add consumer response. Note that some versions of the server gather the - // consumer info *after* the creation of the consumer, which means that - // some messages may have been already delivered. So the sum of the two - // is a more accurate representation of the number of messages pending or - // in the process of being delivered to the subscription when created. - pending uint64 - - // Ordered consumers - ordered bool - dseq uint64 - sseq uint64 - ccreq *createConsumerRequest - - // Heartbeats and Flow Control handling from push consumers. - hbc *time.Timer - hbi time.Duration - active bool - cmeta string - fcr string - fcd uint64 - fciseq uint64 - csfct *time.Timer - - // Cancellation function to cancel context on drain/unsubscribe. - cancel func() -} - -// Deletes the JS Consumer. -// No connection nor subscription lock must be held on entry. -func (sub *Subscription) deleteConsumer() error { - sub.mu.Lock() - jsi := sub.jsi - if jsi == nil { - sub.mu.Unlock() - return nil - } - if jsi.stream == _EMPTY_ || jsi.consumer == _EMPTY_ { - sub.mu.Unlock() - return nil - } - stream, consumer := jsi.stream, jsi.consumer - js := jsi.js - sub.mu.Unlock() - - return js.DeleteConsumer(stream, consumer) -} - -// SubOpt configures options for subscribing to JetStream consumers. -type SubOpt interface { - configureSubscribe(opts *subOpts) error -} - -// subOptFn is a function option used to configure a JetStream Subscribe. -type subOptFn func(opts *subOpts) error - -func (opt subOptFn) configureSubscribe(opts *subOpts) error { - return opt(opts) -} - -// Subscribe creates an async Subscription for JetStream. -// The stream and consumer names can be provided with the nats.Bind() option. -// For creating an ephemeral (where the consumer name is picked by the server), -// you can provide the stream name with nats.BindStream(). -// If no stream name is specified, the library will attempt to figure out which -// stream the subscription is for. See important notes below for more details. -// -// IMPORTANT NOTES: -// * If none of the options Bind() nor Durable() are specified, the library will -// send a request to the server to create an ephemeral JetStream consumer, -// which will be deleted after an Unsubscribe() or Drain(), or automatically -// by the server after a short period of time after the NATS subscription is -// gone. -// * If Durable() option is specified, the library will attempt to lookup a JetStream -// consumer with this name, and if found, will bind to it and not attempt to -// delete it. However, if not found, the library will send a request to create -// such durable JetStream consumer. The library will delete the JetStream consumer -// after an Unsubscribe() or Drain(). -// * If Bind() option is provided, the library will attempt to lookup the -// consumer with the given name, and if successful, bind to it. If the lookup fails, -// then the Subscribe() call will return an error. -func (js *js) Subscribe(subj string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) { - if cb == nil { - return nil, ErrBadSubscription - } - return js.subscribe(subj, _EMPTY_, cb, nil, false, false, opts) -} - -// SubscribeSync creates a Subscription that can be used to process messages synchronously. -// See important note in Subscribe() -func (js *js) SubscribeSync(subj string, opts ...SubOpt) (*Subscription, error) { - mch := make(chan *Msg, js.nc.Opts.SubChanLen) - return js.subscribe(subj, _EMPTY_, nil, mch, true, false, opts) -} - -// QueueSubscribe creates a Subscription with a queue group. -// If no optional durable name nor binding options are specified, the queue name will be used as a durable name. -// See important note in Subscribe() -func (js *js) QueueSubscribe(subj, queue string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) { - if cb == nil { - return nil, ErrBadSubscription - } - return js.subscribe(subj, queue, cb, nil, false, false, opts) -} - -// QueueSubscribeSync creates a Subscription with a queue group that can be used to process messages synchronously. -// See important note in QueueSubscribe() -func (js *js) QueueSubscribeSync(subj, queue string, opts ...SubOpt) (*Subscription, error) { - mch := make(chan *Msg, js.nc.Opts.SubChanLen) - return js.subscribe(subj, queue, nil, mch, true, false, opts) -} - -// ChanSubscribe creates channel based Subscription. -// Using ChanSubscribe without buffered capacity is not recommended since -// it will be prone to dropping messages with a slow consumer error. Make sure to give the channel enough -// capacity to handle bursts in traffic, for example other Subscribe APIs use a default of 512k capacity in comparison. -// See important note in Subscribe() -func (js *js) ChanSubscribe(subj string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) { - return js.subscribe(subj, _EMPTY_, nil, ch, false, false, opts) -} - -// ChanQueueSubscribe creates channel based Subscription with a queue group. -// See important note in QueueSubscribe() -func (js *js) ChanQueueSubscribe(subj, queue string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) { - return js.subscribe(subj, queue, nil, ch, false, false, opts) -} - -// PullSubscribe creates a Subscription that can fetch messages. -// See important note in Subscribe() -func (js *js) PullSubscribe(subj, durable string, opts ...SubOpt) (*Subscription, error) { - mch := make(chan *Msg, js.nc.Opts.SubChanLen) - if durable != "" { - opts = append(opts, Durable(durable)) - } - return js.subscribe(subj, _EMPTY_, nil, mch, true, true, opts) -} - -func processConsInfo(info *ConsumerInfo, userCfg *ConsumerConfig, isPullMode bool, subj, queue string) (string, error) { - ccfg := &info.Config - - // Make sure this new subject matches or is a subset. - if ccfg.FilterSubject != _EMPTY_ && subj != ccfg.FilterSubject { - return _EMPTY_, ErrSubjectMismatch - } - - // Prevent binding a subscription against incompatible consumer types. - if isPullMode && ccfg.DeliverSubject != _EMPTY_ { - return _EMPTY_, ErrPullSubscribeToPushConsumer - } else if !isPullMode && ccfg.DeliverSubject == _EMPTY_ { - return _EMPTY_, ErrPullSubscribeRequired - } - - // If pull mode, nothing else to check here. - if isPullMode { - return _EMPTY_, checkConfig(ccfg, userCfg) - } - - // At this point, we know the user wants push mode, and the JS consumer is - // really push mode. - - dg := info.Config.DeliverGroup - if dg == _EMPTY_ { - // Prevent an user from attempting to create a queue subscription on - // a JS consumer that was not created with a deliver group. - if queue != _EMPTY_ { - return _EMPTY_, fmt.Errorf("cannot create a queue subscription for a consumer without a deliver group") - } else if info.PushBound { - // Need to reject a non queue subscription to a non queue consumer - // if the consumer is already bound. - return _EMPTY_, fmt.Errorf("consumer is already bound to a subscription") - } - } else { - // If the JS consumer has a deliver group, we need to fail a non queue - // subscription attempt: - if queue == _EMPTY_ { - return _EMPTY_, fmt.Errorf("cannot create a subscription for a consumer with a deliver group %q", dg) - } else if queue != dg { - // Here the user's queue group name does not match the one associated - // with the JS consumer. - return _EMPTY_, fmt.Errorf("cannot create a queue subscription %q for a consumer with a deliver group %q", - queue, dg) - } - } - if err := checkConfig(ccfg, userCfg); err != nil { - return _EMPTY_, err - } - return ccfg.DeliverSubject, nil -} - -func checkConfig(s, u *ConsumerConfig) error { - makeErr := func(fieldName string, usrVal, srvVal any) error { - return fmt.Errorf("configuration requests %s to be %v, but consumer's value is %v", fieldName, usrVal, srvVal) - } - - if u.Durable != _EMPTY_ && u.Durable != s.Durable { - return makeErr("durable", u.Durable, s.Durable) - } - if u.Description != _EMPTY_ && u.Description != s.Description { - return makeErr("description", u.Description, s.Description) - } - if u.DeliverPolicy != deliverPolicyNotSet && u.DeliverPolicy != s.DeliverPolicy { - return makeErr("deliver policy", u.DeliverPolicy, s.DeliverPolicy) - } - if u.OptStartSeq > 0 && u.OptStartSeq != s.OptStartSeq { - return makeErr("optional start sequence", u.OptStartSeq, s.OptStartSeq) - } - if u.OptStartTime != nil && !u.OptStartTime.IsZero() && !(*u.OptStartTime).Equal(*s.OptStartTime) { - return makeErr("optional start time", u.OptStartTime, s.OptStartTime) - } - if u.AckPolicy != ackPolicyNotSet && u.AckPolicy != s.AckPolicy { - return makeErr("ack policy", u.AckPolicy, s.AckPolicy) - } - if u.AckWait > 0 && u.AckWait != s.AckWait { - return makeErr("ack wait", u.AckWait, s.AckWait) - } - if u.MaxDeliver > 0 && u.MaxDeliver != s.MaxDeliver { - return makeErr("max deliver", u.MaxDeliver, s.MaxDeliver) - } - if u.ReplayPolicy != replayPolicyNotSet && u.ReplayPolicy != s.ReplayPolicy { - return makeErr("replay policy", u.ReplayPolicy, s.ReplayPolicy) - } - if u.RateLimit > 0 && u.RateLimit != s.RateLimit { - return makeErr("rate limit", u.RateLimit, s.RateLimit) - } - if u.SampleFrequency != _EMPTY_ && u.SampleFrequency != s.SampleFrequency { - return makeErr("sample frequency", u.SampleFrequency, s.SampleFrequency) - } - if u.MaxWaiting > 0 && u.MaxWaiting != s.MaxWaiting { - return makeErr("max waiting", u.MaxWaiting, s.MaxWaiting) - } - if u.MaxAckPending > 0 && u.MaxAckPending != s.MaxAckPending { - return makeErr("max ack pending", u.MaxAckPending, s.MaxAckPending) - } - // For flow control, we want to fail if the user explicit wanted it, but - // it is not set in the existing consumer. If it is not asked by the user, - // the library still handles it and so no reason to fail. - if u.FlowControl && !s.FlowControl { - return makeErr("flow control", u.FlowControl, s.FlowControl) - } - if u.Heartbeat > 0 && u.Heartbeat != s.Heartbeat { - return makeErr("heartbeat", u.Heartbeat, s.Heartbeat) - } - if u.Replicas > 0 && u.Replicas != s.Replicas { - return makeErr("replicas", u.Replicas, s.Replicas) - } - if u.MemoryStorage && !s.MemoryStorage { - return makeErr("memory storage", u.MemoryStorage, s.MemoryStorage) - } - return nil -} - -func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync, isPullMode bool, opts []SubOpt) (*Subscription, error) { - cfg := ConsumerConfig{ - DeliverPolicy: deliverPolicyNotSet, - AckPolicy: ackPolicyNotSet, - ReplayPolicy: replayPolicyNotSet, - } - o := subOpts{cfg: &cfg} - if len(opts) > 0 { - for _, opt := range opts { - if opt == nil { - continue - } - if err := opt.configureSubscribe(&o); err != nil { - return nil, err - } - } - } - - // If no stream name is specified, the subject cannot be empty. - if subj == _EMPTY_ && o.stream == _EMPTY_ { - return nil, fmt.Errorf("nats: subject required") - } - - // Note that these may change based on the consumer info response we may get. - hasHeartbeats := o.cfg.Heartbeat > 0 - hasFC := o.cfg.FlowControl - - // Some checks for pull subscribers - if isPullMode { - // No deliver subject should be provided - if o.cfg.DeliverSubject != _EMPTY_ { - return nil, ErrPullSubscribeToPushConsumer - } - } - - // Some check/setting specific to queue subs - if queue != _EMPTY_ { - // Queue subscriber cannot have HB or FC (since messages will be randomly dispatched - // to members). We may in the future have a separate NATS subscription that all members - // would subscribe to and server would send on. - if o.cfg.Heartbeat > 0 || o.cfg.FlowControl { - // Not making this a public ErrXXX in case we allow in the future. - return nil, fmt.Errorf("nats: queue subscription doesn't support idle heartbeat nor flow control") - } - - // If this is a queue subscription and no consumer nor durable name was specified, - // then we will use the queue name as a durable name. - if o.consumer == _EMPTY_ && o.cfg.Durable == _EMPTY_ { - if err := checkConsumerName(queue); err != nil { - return nil, err - } - o.cfg.Durable = queue - } - } - - var ( - err error - shouldCreate bool - info *ConsumerInfo - deliver string - stream = o.stream - consumer = o.consumer - isDurable = o.cfg.Durable != _EMPTY_ - consumerBound = o.bound - ctx = o.ctx - skipCInfo = o.skipCInfo - notFoundErr bool - lookupErr bool - nc = js.nc - nms string - hbi time.Duration - ccreq *createConsumerRequest // In case we need to hold onto it for ordered consumers. - maxap int - ) - - // Do some quick checks here for ordered consumers. We do these here instead of spread out - // in the individual SubOpts. - if o.ordered { - // Make sure we are not durable. - if isDurable { - return nil, fmt.Errorf("nats: durable can not be set for an ordered consumer") - } - // Check ack policy. - if o.cfg.AckPolicy != ackPolicyNotSet { - return nil, fmt.Errorf("nats: ack policy can not be set for an ordered consumer") - } - // Check max deliver. - if o.cfg.MaxDeliver != 1 && o.cfg.MaxDeliver != 0 { - return nil, fmt.Errorf("nats: max deliver can not be set for an ordered consumer") - } - // No deliver subject, we pick our own. - if o.cfg.DeliverSubject != _EMPTY_ { - return nil, fmt.Errorf("nats: deliver subject can not be set for an ordered consumer") - } - // Queue groups not allowed. - if queue != _EMPTY_ { - return nil, fmt.Errorf("nats: queues not be set for an ordered consumer") - } - // Check for bound consumers. - if consumer != _EMPTY_ { - return nil, fmt.Errorf("nats: can not bind existing consumer for an ordered consumer") - } - // Check for pull mode. - if isPullMode { - return nil, fmt.Errorf("nats: can not use pull mode for an ordered consumer") - } - // Setup how we need it to be here. - o.cfg.FlowControl = true - o.cfg.AckPolicy = AckNonePolicy - o.cfg.MaxDeliver = 1 - o.cfg.AckWait = 22 * time.Hour // Just set to something known, not utilized. - // Force R1 and MemoryStorage for these. - o.cfg.Replicas = 1 - o.cfg.MemoryStorage = true - - if !hasHeartbeats { - o.cfg.Heartbeat = orderedHeartbeatsInterval - } - hasFC, hasHeartbeats = true, true - o.mack = true // To avoid auto-ack wrapping call below. - hbi = o.cfg.Heartbeat - } - - // In case a consumer has not been set explicitly, then the - // durable name will be used as the consumer name. - if consumer == _EMPTY_ { - consumer = o.cfg.Durable - } - - // Find the stream mapped to the subject if not bound to a stream already. - if stream == _EMPTY_ { - stream, err = js.StreamNameBySubject(subj) - if err != nil { - return nil, err - } - } - - // With an explicit durable name, we can lookup the consumer first - // to which it should be attaching to. - // If SkipConsumerLookup was used, do not call consumer info. - if consumer != _EMPTY_ && !o.skipCInfo { - info, err = js.ConsumerInfo(stream, consumer) - notFoundErr = errors.Is(err, ErrConsumerNotFound) - lookupErr = err == ErrJetStreamNotEnabled || errors.Is(err, ErrTimeout) || errors.Is(err, context.DeadlineExceeded) - } - - switch { - case info != nil: - deliver, err = processConsInfo(info, o.cfg, isPullMode, subj, queue) - if err != nil { - return nil, err - } - icfg := &info.Config - hasFC, hbi = icfg.FlowControl, icfg.Heartbeat - hasHeartbeats = hbi > 0 - maxap = icfg.MaxAckPending - case (err != nil && !notFoundErr) || (notFoundErr && consumerBound): - // If the consumer is being bound and we got an error on pull subscribe then allow the error. - if !(isPullMode && lookupErr && consumerBound) { - return nil, err - } - case skipCInfo: - // When skipping consumer info, need to rely on the manually passed sub options - // to match the expected behavior from the subscription. - hasFC, hbi = o.cfg.FlowControl, o.cfg.Heartbeat - hasHeartbeats = hbi > 0 - maxap = o.cfg.MaxAckPending - deliver = o.cfg.DeliverSubject - if consumerBound { - break - } - - // When not bound to a consumer already, proceed to create. - fallthrough - default: - // Attempt to create consumer if not found nor using Bind. - shouldCreate = true - if o.cfg.DeliverSubject != _EMPTY_ { - deliver = o.cfg.DeliverSubject - } else if !isPullMode { - deliver = nc.NewInbox() - cfg.DeliverSubject = deliver - } - // Do filtering always, server will clear as needed. - cfg.FilterSubject = subj - - // Pass the queue to the consumer config - if queue != _EMPTY_ { - cfg.DeliverGroup = queue - } - - // If not set, default to deliver all - if cfg.DeliverPolicy == deliverPolicyNotSet { - cfg.DeliverPolicy = DeliverAllPolicy - } - // If not set, default to ack explicit. - if cfg.AckPolicy == ackPolicyNotSet { - cfg.AckPolicy = AckExplicitPolicy - } - // If not set, default to instant - if cfg.ReplayPolicy == replayPolicyNotSet { - cfg.ReplayPolicy = ReplayInstantPolicy - } - - // If we have acks at all and the MaxAckPending is not set go ahead - // and set to the internal max for channel based consumers - if cfg.MaxAckPending == 0 && ch != nil && cfg.AckPolicy != AckNonePolicy { - cfg.MaxAckPending = cap(ch) - } - // Create request here. - ccreq = &createConsumerRequest{ - Stream: stream, - Config: &cfg, - } - hbi = cfg.Heartbeat - } - - if isPullMode { - nms = fmt.Sprintf(js.apiSubj(apiRequestNextT), stream, consumer) - deliver = nc.NewInbox() - // for pull consumers, create a wildcard subscription to differentiate pull requests - deliver += ".*" - } - - // In case this has a context, then create a child context that - // is possible to cancel via unsubscribe / drain. - var cancel func() - if ctx != nil { - ctx, cancel = context.WithCancel(ctx) - } - - jsi := &jsSub{ - js: js, - stream: stream, - consumer: consumer, - deliver: deliver, - hbi: hbi, - ordered: o.ordered, - ccreq: ccreq, - dseq: 1, - pull: isPullMode, - nms: nms, - psubj: subj, - cancel: cancel, - ackNone: o.cfg.AckPolicy == AckNonePolicy, - } - - // Auto acknowledge unless manual ack is set or policy is set to AckNonePolicy - if cb != nil && !o.mack && o.cfg.AckPolicy != AckNonePolicy { - ocb := cb - cb = func(m *Msg) { ocb(m); m.Ack() } - } - sub, err := nc.subscribe(deliver, queue, cb, ch, isSync, jsi) - if err != nil { - return nil, err - } - - // If we fail and we had the sub we need to cleanup, but can't just do a straight Unsubscribe or Drain. - // We need to clear the jsi so we do not remove any durables etc. - cleanUpSub := func() { - if sub != nil { - sub.mu.Lock() - sub.jsi = nil - sub.mu.Unlock() - sub.Unsubscribe() - } - } - - // If we are creating or updating let's process that request. - consName := o.cfg.Name - if shouldCreate { - if cfg.Durable != "" { - consName = cfg.Durable - } else if consName == "" { - consName = getHash(nuid.Next()) - } - info, err := js.upsertConsumer(stream, consName, ccreq.Config) - if err != nil { - var apiErr *APIError - if ok := errors.As(err, &apiErr); !ok { - cleanUpSub() - return nil, err - } - if consumer == _EMPTY_ || - (apiErr.ErrorCode != JSErrCodeConsumerAlreadyExists && apiErr.ErrorCode != JSErrCodeConsumerNameExists) { - cleanUpSub() - if errors.Is(apiErr, ErrStreamNotFound) { - return nil, ErrStreamNotFound - } - return nil, err - } - // We will not be using this sub here if we were push based. - if !isPullMode { - cleanUpSub() - } - - info, err = js.ConsumerInfo(stream, consumer) - if err != nil { - return nil, err - } - deliver, err = processConsInfo(info, o.cfg, isPullMode, subj, queue) - if err != nil { - return nil, err - } - - if !isPullMode { - // We can't reuse the channel, so if one was passed, we need to create a new one. - if isSync { - ch = make(chan *Msg, cap(ch)) - } else if ch != nil { - // User provided (ChanSubscription), simply try to drain it. - for done := false; !done; { - select { - case <-ch: - default: - done = true - } - } - } - jsi.deliver = deliver - jsi.hbi = info.Config.Heartbeat - - // Recreate the subscription here. - sub, err = nc.subscribe(jsi.deliver, queue, cb, ch, isSync, jsi) - if err != nil { - return nil, err - } - hasFC = info.Config.FlowControl - hasHeartbeats = info.Config.Heartbeat > 0 - } - } else { - // Since the library created the JS consumer, it will delete it on Unsubscribe()/Drain() - sub.mu.Lock() - sub.jsi.dc = true - sub.jsi.pending = info.NumPending + info.Delivered.Consumer - // If this is an ephemeral, we did not have a consumer name, we get it from the info - // after the AddConsumer returns. - if consumer == _EMPTY_ { - sub.jsi.consumer = info.Name - if isPullMode { - sub.jsi.nms = fmt.Sprintf(js.apiSubj(apiRequestNextT), stream, info.Name) - } - } - sub.mu.Unlock() - } - // Capture max ack pending from the info response here which covers both - // success and failure followed by consumer lookup. - maxap = info.Config.MaxAckPending - } - - // If maxap is greater than the default sub's pending limit, use that. - if maxap > DefaultSubPendingMsgsLimit { - // For bytes limit, use the min of maxp*1MB or DefaultSubPendingBytesLimit - bl := maxap * 1024 * 1024 - if bl < DefaultSubPendingBytesLimit { - bl = DefaultSubPendingBytesLimit - } - if err := sub.SetPendingLimits(maxap, bl); err != nil { - return nil, err - } - } - - // Do heartbeats last if needed. - if hasHeartbeats { - sub.scheduleHeartbeatCheck() - } - // For ChanSubscriptions, if we know that there is flow control, we will - // start a go routine that evaluates the number of delivered messages - // and process flow control. - if sub.Type() == ChanSubscription && hasFC { - sub.chanSubcheckForFlowControlResponse() - } - - // Wait for context to get canceled if there is one. - if ctx != nil { - go func() { - <-ctx.Done() - sub.Unsubscribe() - }() - } - - return sub, nil -} - -// InitialConsumerPending returns the number of messages pending to be -// delivered to the consumer when the subscription was created. -func (sub *Subscription) InitialConsumerPending() (uint64, error) { - sub.mu.Lock() - defer sub.mu.Unlock() - if sub.jsi == nil || sub.jsi.consumer == _EMPTY_ { - return 0, fmt.Errorf("%w: not a JetStream subscription", ErrTypeSubscription) - } - return sub.jsi.pending, nil -} - -// This long-lived routine is used per ChanSubscription to check -// on the number of delivered messages and check for flow control response. -func (sub *Subscription) chanSubcheckForFlowControlResponse() { - sub.mu.Lock() - // We don't use defer since if we need to send an RC reply, we need - // to do it outside the sub's lock. So doing explicit unlock... - if sub.closed { - sub.mu.Unlock() - return - } - var fcReply string - var nc *Conn - - jsi := sub.jsi - if jsi.csfct == nil { - jsi.csfct = time.AfterFunc(chanSubFCCheckInterval, sub.chanSubcheckForFlowControlResponse) - } else { - fcReply = sub.checkForFlowControlResponse() - nc = sub.conn - // Do the reset here under the lock, it's ok... - jsi.csfct.Reset(chanSubFCCheckInterval) - } - sub.mu.Unlock() - // This call will return an error (which we don't care here) - // if nc is nil or fcReply is empty. - nc.Publish(fcReply, nil) -} - -// ErrConsumerSequenceMismatch represents an error from a consumer -// that received a Heartbeat including sequence different to the -// one expected from the view of the client. -type ErrConsumerSequenceMismatch struct { - // StreamResumeSequence is the stream sequence from where the consumer - // should resume consuming from the stream. - StreamResumeSequence uint64 - - // ConsumerSequence is the sequence of the consumer that is behind. - ConsumerSequence uint64 - - // LastConsumerSequence is the sequence of the consumer when the heartbeat - // was received. - LastConsumerSequence uint64 -} - -func (ecs *ErrConsumerSequenceMismatch) Error() string { - return fmt.Sprintf("nats: sequence mismatch for consumer at sequence %d (%d sequences behind), should restart consumer from stream sequence %d", - ecs.ConsumerSequence, - ecs.LastConsumerSequence-ecs.ConsumerSequence, - ecs.StreamResumeSequence, - ) -} - -// isJSControlMessage will return true if this is an empty control status message -// and indicate what type of control message it is, say jsCtrlHB or jsCtrlFC -func isJSControlMessage(msg *Msg) (bool, int) { - if len(msg.Data) > 0 || msg.Header.Get(statusHdr) != controlMsg { - return false, 0 - } - val := msg.Header.Get(descrHdr) - if strings.HasPrefix(val, "Idle") { - return true, jsCtrlHB - } - if strings.HasPrefix(val, "Flow") { - return true, jsCtrlFC - } - return true, 0 -} - -// Keeps track of the incoming message's reply subject so that the consumer's -// state (deliver sequence, etc..) can be checked against heartbeats. -// We will also bump the incoming data message sequence that is used in FC cases. -// Runs under the subscription lock -func (sub *Subscription) trackSequences(reply string) { - // For flow control, keep track of incoming message sequence. - sub.jsi.fciseq++ - sub.jsi.cmeta = reply -} - -// Check to make sure messages are arriving in order. -// Returns true if the sub had to be replaced. Will cause upper layers to return. -// The caller has verified that sub.jsi != nil and that this is not a control message. -// Lock should be held. -func (sub *Subscription) checkOrderedMsgs(m *Msg) bool { - // Ignore msgs with no reply like HBs and flow control, they are handled elsewhere. - if m.Reply == _EMPTY_ { - return false - } - - // Normal message here. - tokens, err := parser.GetMetadataFields(m.Reply) - if err != nil { - return false - } - sseq, dseq := parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]), parser.ParseNum(tokens[parser.AckConsumerSeqTokenPos]) - - jsi := sub.jsi - if dseq != jsi.dseq { - sub.resetOrderedConsumer(jsi.sseq + 1) - return true - } - // Update our tracking here. - jsi.dseq, jsi.sseq = dseq+1, sseq - return false -} - -// Update and replace sid. -// Lock should be held on entry but will be unlocked to prevent lock inversion. -func (sub *Subscription) applyNewSID() (osid int64) { - nc := sub.conn - sub.mu.Unlock() - - nc.subsMu.Lock() - osid = sub.sid - delete(nc.subs, osid) - // Place new one. - nc.ssid++ - nsid := nc.ssid - nc.subs[nsid] = sub - nc.subsMu.Unlock() - - sub.mu.Lock() - sub.sid = nsid - return osid -} - -// We are here if we have detected a gap with an ordered consumer. -// We will create a new consumer and rewire the low level subscription. -// Lock should be held. -func (sub *Subscription) resetOrderedConsumer(sseq uint64) { - nc := sub.conn - if sub.jsi == nil || nc == nil || sub.closed { - return - } - - var maxStr string - // If there was an AUTO_UNSUB done, we need to adjust the new value - // to send after the SUB for the new sid. - if sub.max > 0 { - if sub.jsi.fciseq < sub.max { - adjustedMax := sub.max - sub.jsi.fciseq - maxStr = strconv.Itoa(int(adjustedMax)) - } else { - // We are already at the max, so we should just unsub the - // existing sub and be done - go func(sid int64) { - nc.mu.Lock() - nc.bw.appendString(fmt.Sprintf(unsubProto, sid, _EMPTY_)) - nc.kickFlusher() - nc.mu.Unlock() - }(sub.sid) - return - } - } - - // Quick unsubscribe. Since we know this is a simple push subscriber we do in place. - osid := sub.applyNewSID() - - // Grab new inbox. - newDeliver := nc.NewInbox() - sub.Subject = newDeliver - - // Snapshot the new sid under sub lock. - nsid := sub.sid - - // We are still in the low level readLoop for the connection so we need - // to spin a go routine to try to create the new consumer. - go func() { - // Unsubscribe and subscribe with new inbox and sid. - // Remap a new low level sub into this sub since its client accessible. - // This is done here in this go routine to prevent lock inversion. - nc.mu.Lock() - nc.bw.appendString(fmt.Sprintf(unsubProto, osid, _EMPTY_)) - nc.bw.appendString(fmt.Sprintf(subProto, newDeliver, _EMPTY_, nsid)) - if maxStr != _EMPTY_ { - nc.bw.appendString(fmt.Sprintf(unsubProto, nsid, maxStr)) - } - nc.kickFlusher() - nc.mu.Unlock() - - pushErr := func(err error) { - nc.handleConsumerSequenceMismatch(sub, fmt.Errorf("%w: recreating ordered consumer", err)) - nc.unsubscribe(sub, 0, true) - } - - sub.mu.Lock() - jsi := sub.jsi - // Reset some items in jsi. - jsi.dseq = 1 - jsi.cmeta = _EMPTY_ - jsi.fcr, jsi.fcd = _EMPTY_, 0 - jsi.deliver = newDeliver - // Reset consumer request for starting policy. - cfg := jsi.ccreq.Config - cfg.DeliverSubject = newDeliver - cfg.DeliverPolicy = DeliverByStartSequencePolicy - cfg.OptStartSeq = sseq - // In case the consumer was created with a start time, we need to clear it - // since we are now using a start sequence. - cfg.OptStartTime = nil - - js := jsi.js - sub.mu.Unlock() - - sub.mu.Lock() - // Attempt to delete the existing consumer. - // We don't wait for the response since even if it's unsuccessful, - // inactivity threshold will kick in and delete it. - if jsi.consumer != _EMPTY_ { - go js.DeleteConsumer(jsi.stream, jsi.consumer) - } - jsi.consumer = "" - sub.mu.Unlock() - consName := getHash(nuid.Next()) - cinfo, err := js.upsertConsumer(jsi.stream, consName, cfg) - if err != nil { - var apiErr *APIError - if errors.Is(err, ErrJetStreamNotEnabled) || errors.Is(err, ErrTimeout) || errors.Is(err, context.DeadlineExceeded) { - // if creating consumer failed, retry - return - } else if errors.As(err, &apiErr) && apiErr.ErrorCode == JSErrCodeInsufficientResourcesErr { - // retry for insufficient resources, as it may mean that client is connected to a running - // server in cluster while the server hosting R1 JetStream resources is restarting - return - } - pushErr(err) - return - } - - sub.mu.Lock() - jsi.consumer = cinfo.Name - sub.mu.Unlock() - }() -} - -// For jetstream subscriptions, returns the number of delivered messages. -// For ChanSubscription, this value is computed based on the known number -// of messages added to the channel minus the current size of that channel. -// Lock held on entry -func (sub *Subscription) getJSDelivered() uint64 { - if sub.typ == ChanSubscription { - return sub.jsi.fciseq - uint64(len(sub.mch)) - } - return sub.delivered -} - -// checkForFlowControlResponse will check to see if we should send a flow control response -// based on the subscription current delivered index and the target. -// Runs under subscription lock -func (sub *Subscription) checkForFlowControlResponse() string { - // Caller has verified that there is a sub.jsi and fc - jsi := sub.jsi - jsi.active = true - if sub.getJSDelivered() >= jsi.fcd { - fcr := jsi.fcr - jsi.fcr, jsi.fcd = _EMPTY_, 0 - return fcr - } - return _EMPTY_ -} - -// Record an inbound flow control message. -// Runs under subscription lock -func (sub *Subscription) scheduleFlowControlResponse(reply string) { - sub.jsi.fcr, sub.jsi.fcd = reply, sub.jsi.fciseq -} - -// Checks for activity from our consumer. -// If we do not think we are active send an async error. -func (sub *Subscription) activityCheck() { - sub.mu.Lock() - jsi := sub.jsi - if jsi == nil || sub.closed { - sub.mu.Unlock() - return - } - - active := jsi.active - jsi.hbc.Reset(jsi.hbi * hbcThresh) - jsi.active = false - nc := sub.conn - sub.mu.Unlock() - - if !active { - if !jsi.ordered || nc.Status() != CONNECTED { - nc.mu.Lock() - if errCB := nc.Opts.AsyncErrorCB; errCB != nil { - nc.ach.push(func() { errCB(nc, sub, ErrConsumerNotActive) }) - } - nc.mu.Unlock() - return - } - sub.mu.Lock() - sub.resetOrderedConsumer(jsi.sseq + 1) - sub.mu.Unlock() - } -} - -// scheduleHeartbeatCheck sets up the timer check to make sure we are active -// or receiving idle heartbeats.. -func (sub *Subscription) scheduleHeartbeatCheck() { - sub.mu.Lock() - defer sub.mu.Unlock() - - jsi := sub.jsi - if jsi == nil { - return - } - - if jsi.hbc == nil { - jsi.hbc = time.AfterFunc(jsi.hbi*hbcThresh, sub.activityCheck) - } else { - jsi.hbc.Reset(jsi.hbi * hbcThresh) - } -} - -// handleConsumerSequenceMismatch will send an async error that can be used to restart a push based consumer. -func (nc *Conn) handleConsumerSequenceMismatch(sub *Subscription, err error) { - nc.mu.Lock() - errCB := nc.Opts.AsyncErrorCB - if errCB != nil { - nc.ach.push(func() { errCB(nc, sub, err) }) - } - nc.mu.Unlock() -} - -// checkForSequenceMismatch will make sure we have not missed any messages since last seen. -func (nc *Conn) checkForSequenceMismatch(msg *Msg, s *Subscription, jsi *jsSub) { - // Process heartbeat received, get latest control metadata if present. - s.mu.Lock() - ctrl, ordered := jsi.cmeta, jsi.ordered - jsi.active = true - s.mu.Unlock() - - if ctrl == _EMPTY_ { - return - } - - tokens, err := parser.GetMetadataFields(ctrl) - if err != nil { - return - } - - // Consumer sequence. - var ldseq string - dseq := tokens[parser.AckConsumerSeqTokenPos] - hdr := msg.Header[lastConsumerSeqHdr] - if len(hdr) == 1 { - ldseq = hdr[0] - } - - // Detect consumer sequence mismatch and whether - // should restart the consumer. - if ldseq != dseq { - // Dispatch async error including details such as - // from where the consumer could be restarted. - sseq := parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]) - if ordered { - s.mu.Lock() - s.resetOrderedConsumer(jsi.sseq + 1) - s.mu.Unlock() - } else { - ecs := &ErrConsumerSequenceMismatch{ - StreamResumeSequence: uint64(sseq), - ConsumerSequence: parser.ParseNum(dseq), - LastConsumerSequence: parser.ParseNum(ldseq), - } - nc.handleConsumerSequenceMismatch(s, ecs) - } - } -} - -type streamRequest struct { - Subject string `json:"subject,omitempty"` -} - -type streamNamesResponse struct { - apiResponse - apiPaged - Streams []string `json:"streams"` -} - -type subOpts struct { - // For attaching. - stream, consumer string - // For creating or updating. - cfg *ConsumerConfig - // For binding a subscription to a consumer without creating it. - bound bool - // For manual ack - mack bool - // For an ordered consumer. - ordered bool - ctx context.Context - - // To disable calling ConsumerInfo - skipCInfo bool -} - -// SkipConsumerLookup will omit looking up consumer when [Bind], [Durable] -// or [ConsumerName] are provided. -// -// NOTE: This setting may cause an existing consumer to be overwritten. Also, -// because consumer lookup is skipped, all consumer options like AckPolicy, -// DeliverSubject etc. need to be provided even if consumer already exists. -func SkipConsumerLookup() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.skipCInfo = true - return nil - }) -} - -// OrderedConsumer will create a FIFO direct/ephemeral consumer for in order delivery of messages. -// There are no redeliveries and no acks, and flow control and heartbeats will be added but -// will be taken care of without additional client code. -func OrderedConsumer() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.ordered = true - return nil - }) -} - -// ManualAck disables auto ack functionality for async subscriptions. -func ManualAck() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.mack = true - return nil - }) -} - -// Description will set the description for the created consumer. -func Description(description string) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.Description = description - return nil - }) -} - -// Durable defines the consumer name for JetStream durable subscribers. -// This function will return ErrInvalidConsumerName if the name contains -// any dot ".". -func Durable(consumer string) SubOpt { - return subOptFn(func(opts *subOpts) error { - if opts.cfg.Durable != _EMPTY_ { - return fmt.Errorf("nats: option Durable set more than once") - } - if opts.consumer != _EMPTY_ && opts.consumer != consumer { - return fmt.Errorf("nats: duplicate consumer names (%s and %s)", opts.consumer, consumer) - } - if err := checkConsumerName(consumer); err != nil { - return err - } - - opts.cfg.Durable = consumer - return nil - }) -} - -// DeliverAll will configure a Consumer to receive all the -// messages from a Stream. -func DeliverAll() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.DeliverPolicy = DeliverAllPolicy - return nil - }) -} - -// DeliverLast configures a Consumer to receive messages -// starting with the latest one. -func DeliverLast() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.DeliverPolicy = DeliverLastPolicy - return nil - }) -} - -// DeliverLastPerSubject configures a Consumer to receive messages -// starting with the latest one for each filtered subject. -func DeliverLastPerSubject() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.DeliverPolicy = DeliverLastPerSubjectPolicy - return nil - }) -} - -// DeliverNew configures a Consumer to receive messages -// published after the subscription. -func DeliverNew() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.DeliverPolicy = DeliverNewPolicy - return nil - }) -} - -// StartSequence configures a Consumer to receive -// messages from a start sequence. -func StartSequence(seq uint64) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.DeliverPolicy = DeliverByStartSequencePolicy - opts.cfg.OptStartSeq = seq - return nil - }) -} - -// StartTime configures a Consumer to receive -// messages from a start time. -func StartTime(startTime time.Time) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.DeliverPolicy = DeliverByStartTimePolicy - opts.cfg.OptStartTime = &startTime - return nil - }) -} - -// AckNone requires no acks for delivered messages. -func AckNone() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.AckPolicy = AckNonePolicy - return nil - }) -} - -// AckAll when acking a sequence number, this implicitly acks all sequences -// below this one as well. -func AckAll() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.AckPolicy = AckAllPolicy - return nil - }) -} - -// AckExplicit requires ack or nack for all messages. -func AckExplicit() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.AckPolicy = AckExplicitPolicy - return nil - }) -} - -// MaxDeliver sets the number of redeliveries for a message. -func MaxDeliver(n int) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.MaxDeliver = n - return nil - }) -} - -// MaxAckPending sets the number of outstanding acks that are allowed before -// message delivery is halted. -func MaxAckPending(n int) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.MaxAckPending = n - return nil - }) -} - -// ReplayOriginal replays the messages at the original speed. -func ReplayOriginal() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.ReplayPolicy = ReplayOriginalPolicy - return nil - }) -} - -// ReplayInstant replays the messages as fast as possible. -func ReplayInstant() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.ReplayPolicy = ReplayInstantPolicy - return nil - }) -} - -// RateLimit is the Bits per sec rate limit applied to a push consumer. -func RateLimit(n uint64) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.RateLimit = n - return nil - }) -} - -// BackOff is an array of time durations that represent the time to delay based on delivery count. -func BackOff(backOff []time.Duration) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.BackOff = backOff - return nil - }) -} - -// BindStream binds a consumer to a stream explicitly based on a name. -// When a stream name is not specified, the library uses the subscribe -// subject as a way to find the stream name. It is done by making a request -// to the server to get list of stream names that have a filter for this -// subject. If the returned list contains a single stream, then this -// stream name will be used, otherwise the `ErrNoMatchingStream` is returned. -// To avoid the stream lookup, provide the stream name with this function. -// See also `Bind()`. -func BindStream(stream string) SubOpt { - return subOptFn(func(opts *subOpts) error { - if opts.stream != _EMPTY_ && opts.stream != stream { - return fmt.Errorf("nats: duplicate stream name (%s and %s)", opts.stream, stream) - } - - opts.stream = stream - return nil - }) -} - -// Bind binds a subscription to an existing consumer from a stream without attempting to create. -// The first argument is the stream name and the second argument will be the consumer name. -func Bind(stream, consumer string) SubOpt { - return subOptFn(func(opts *subOpts) error { - if stream == _EMPTY_ { - return ErrStreamNameRequired - } - if consumer == _EMPTY_ { - return ErrConsumerNameRequired - } - - // In case of pull subscribers, the durable name is a required parameter - // so check that they are not different. - if opts.cfg.Durable != _EMPTY_ && opts.cfg.Durable != consumer { - return fmt.Errorf("nats: duplicate consumer names (%s and %s)", opts.cfg.Durable, consumer) - } - if opts.stream != _EMPTY_ && opts.stream != stream { - return fmt.Errorf("nats: duplicate stream name (%s and %s)", opts.stream, stream) - } - opts.stream = stream - opts.consumer = consumer - opts.bound = true - return nil - }) -} - -// EnableFlowControl enables flow control for a push based consumer. -func EnableFlowControl() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.FlowControl = true - return nil - }) -} - -// IdleHeartbeat enables push based consumers to have idle heartbeats delivered. -// For pull consumers, idle heartbeat has to be set on each [Fetch] call. -func IdleHeartbeat(duration time.Duration) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.Heartbeat = duration - return nil - }) -} - -// DeliverSubject specifies the JetStream consumer deliver subject. -// -// This option is used only in situations where the consumer does not exist -// and a creation request is sent to the server. If not provided, an inbox -// will be selected. -// If a consumer exists, then the NATS subscription will be created on -// the JetStream consumer's DeliverSubject, not necessarily this subject. -func DeliverSubject(subject string) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.DeliverSubject = subject - return nil - }) -} - -// HeadersOnly() will instruct the consumer to only deliver headers and no payloads. -func HeadersOnly() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.HeadersOnly = true - return nil - }) -} - -// MaxRequestBatch sets the maximum pull consumer batch size that a Fetch() -// can request. -func MaxRequestBatch(max int) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.MaxRequestBatch = max - return nil - }) -} - -// MaxRequestExpires sets the maximum pull consumer request expiration that a -// Fetch() can request (using the Fetch's timeout value). -func MaxRequestExpires(max time.Duration) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.MaxRequestExpires = max - return nil - }) -} - -// MaxRequesMaxBytes sets the maximum pull consumer request bytes that a -// Fetch() can receive. -func MaxRequestMaxBytes(bytes int) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.MaxRequestMaxBytes = bytes - return nil - }) -} - -// InactiveThreshold indicates how long the server should keep a consumer -// after detecting a lack of activity. In NATS Server 2.8.4 and earlier, this -// option only applies to ephemeral consumers. In NATS Server 2.9.0 and later, -// this option applies to both ephemeral and durable consumers, allowing durable -// consumers to also be deleted automatically after the inactivity threshold has -// passed. -func InactiveThreshold(threshold time.Duration) SubOpt { - return subOptFn(func(opts *subOpts) error { - if threshold < 0 { - return fmt.Errorf("invalid InactiveThreshold value (%v), needs to be greater or equal to 0", threshold) - } - opts.cfg.InactiveThreshold = threshold - return nil - }) -} - -// ConsumerReplicas sets the number of replica count for a consumer. -func ConsumerReplicas(replicas int) SubOpt { - return subOptFn(func(opts *subOpts) error { - if replicas < 1 { - return fmt.Errorf("invalid ConsumerReplicas value (%v), needs to be greater than 0", replicas) - } - opts.cfg.Replicas = replicas - return nil - }) -} - -// ConsumerMemoryStorage sets the memory storage to true for a consumer. -func ConsumerMemoryStorage() SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.MemoryStorage = true - return nil - }) -} - -// ConsumerName sets the name for a consumer. -func ConsumerName(name string) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.Name = name - return nil - }) -} - -// ConsumerFilterSubjects can be used to set multiple subject filters on the consumer. -// It has to be used in conjunction with [nats.BindStream] and -// with empty 'subject' parameter. -func ConsumerFilterSubjects(subjects ...string) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.FilterSubjects = subjects - return nil - }) -} - -func (sub *Subscription) ConsumerInfo() (*ConsumerInfo, error) { - sub.mu.Lock() - // TODO(dlc) - Better way to mark especially if we attach. - if sub.jsi == nil || sub.jsi.consumer == _EMPTY_ { - sub.mu.Unlock() - return nil, ErrTypeSubscription - } - - // Consumer info lookup should fail if in direct mode. - js := sub.jsi.js - stream, consumer := sub.jsi.stream, sub.jsi.consumer - sub.mu.Unlock() - - return js.getConsumerInfo(stream, consumer) -} - -type pullOpts struct { - maxBytes int - ttl time.Duration - ctx context.Context - hb time.Duration -} - -// PullOpt are the options that can be passed when pulling a batch of messages. -type PullOpt interface { - configurePull(opts *pullOpts) error -} - -// PullMaxWaiting defines the max inflight pull requests. -func PullMaxWaiting(n int) SubOpt { - return subOptFn(func(opts *subOpts) error { - opts.cfg.MaxWaiting = n - return nil - }) -} - -type PullHeartbeat time.Duration - -func (h PullHeartbeat) configurePull(opts *pullOpts) error { - if h <= 0 { - return fmt.Errorf("%w: idle heartbeat has to be greater than 0", ErrInvalidArg) - } - opts.hb = time.Duration(h) - return nil -} - -// PullMaxBytes defines the max bytes allowed for a fetch request. -type PullMaxBytes int - -func (n PullMaxBytes) configurePull(opts *pullOpts) error { - opts.maxBytes = int(n) - return nil -} - -var ( - // errNoMessages is an error that a Fetch request using no_wait can receive to signal - // that there are no more messages available. - errNoMessages = errors.New("nats: no messages") - - // errRequestsPending is an error that represents a sub.Fetch requests that was using - // no_wait and expires time got discarded by the server. - errRequestsPending = errors.New("nats: requests pending") -) - -// Returns if the given message is a user message or not, and if -// `checkSts` is true, returns appropriate error based on the -// content of the status (404, etc..) -func checkMsg(msg *Msg, checkSts, isNoWait bool) (usrMsg bool, err error) { - // Assume user message - usrMsg = true - - // If payload or no header, consider this a user message - if len(msg.Data) > 0 || len(msg.Header) == 0 { - return - } - // Look for status header - val := msg.Header.Get(statusHdr) - // If not present, then this is considered a user message - if val == _EMPTY_ { - return - } - // At this point, this is not a user message since there is - // no payload and a "Status" header. - usrMsg = false - - // If we don't care about status, we are done. - if !checkSts { - return - } - - // if it's a heartbeat message, report as not user msg - if isHb, _ := isJSControlMessage(msg); isHb { - return - } - switch val { - case noResponders: - err = ErrNoResponders - case noMessagesSts: - // 404 indicates that there are no messages. - err = errNoMessages - case reqTimeoutSts: - // In case of a fetch request with no wait request and expires time, - // need to skip 408 errors and retry. - if isNoWait { - err = errRequestsPending - } else { - // Older servers may send a 408 when a request in the server was expired - // and interest is still found, which will be the case for our - // implementation. Regardless, ignore 408 errors until receiving at least - // one message when making requests without no_wait. - err = ErrTimeout - } - case jetStream409Sts: - if strings.Contains(strings.ToLower(msg.Header.Get(descrHdr)), "consumer deleted") { - err = ErrConsumerDeleted - break - } - - if strings.Contains(strings.ToLower(msg.Header.Get(descrHdr)), "leadership change") { - err = ErrConsumerLeadershipChanged - break - } - fallthrough - default: - err = fmt.Errorf("nats: %s", msg.Header.Get(descrHdr)) - } - return -} - -// Fetch pulls a batch of messages from a stream for a pull consumer. -func (sub *Subscription) Fetch(batch int, opts ...PullOpt) ([]*Msg, error) { - if sub == nil { - return nil, ErrBadSubscription - } - if batch < 1 { - return nil, ErrInvalidArg - } - - var o pullOpts - for _, opt := range opts { - if err := opt.configurePull(&o); err != nil { - return nil, err - } - } - if o.ctx != nil && o.ttl != 0 { - return nil, ErrContextAndTimeout - } - - sub.mu.Lock() - jsi := sub.jsi - // Reject if this is not a pull subscription. Note that sub.typ is SyncSubscription, - // so check for jsi.pull boolean instead. - if jsi == nil || !jsi.pull { - sub.mu.Unlock() - return nil, ErrTypeSubscription - } - - nc := sub.conn - nms := sub.jsi.nms - rply, _ := newFetchInbox(jsi.deliver) - js := sub.jsi.js - pmc := len(sub.mch) > 0 - - // All fetch requests have an expiration, in case of no explicit expiration - // then the default timeout of the JetStream context is used. - ttl := o.ttl - if ttl == 0 { - ttl = js.opts.wait - } - sub.mu.Unlock() - - // Use the given context or setup a default one for the span - // of the pull batch request. - var ( - ctx = o.ctx - err error - cancel context.CancelFunc - ) - if ctx == nil { - ctx, cancel = context.WithTimeout(context.Background(), ttl) - } else if _, hasDeadline := ctx.Deadline(); !hasDeadline { - // Prevent from passing the background context which will just block - // and cannot be canceled either. - if octx, ok := ctx.(ContextOpt); ok && octx.Context == context.Background() { - return nil, ErrNoDeadlineContext - } - - // If the context did not have a deadline, then create a new child context - // that will use the default timeout from the JS context. - ctx, cancel = context.WithTimeout(ctx, ttl) - } else { - ctx, cancel = context.WithCancel(ctx) - } - defer cancel() - - // if heartbeat is set, validate it against the context timeout - if o.hb > 0 { - deadline, _ := ctx.Deadline() - if 2*o.hb >= time.Until(deadline) { - return nil, fmt.Errorf("%w: idle heartbeat value too large", ErrInvalidArg) - } - } - - // Check if context not done already before making the request. - select { - case <-ctx.Done(): - if o.ctx != nil { // Timeout or Cancel triggered by context object option - err = ctx.Err() - } else { // Timeout triggered by timeout option - err = ErrTimeout - } - default: - } - if err != nil { - return nil, err - } - - var ( - msgs = make([]*Msg, 0, batch) - msg *Msg - ) - for pmc && len(msgs) < batch { - // Check next msg with booleans that say that this is an internal call - // for a pull subscribe (so don't reject it) and don't wait if there - // are no messages. - msg, err = sub.nextMsgWithContext(ctx, true, false) - if err != nil { - if errors.Is(err, errNoMessages) { - err = nil - } - break - } - // Check msg but just to determine if this is a user message - // or status message, however, we don't care about values of status - // messages at this point in the Fetch() call, so checkMsg can't - // return an error. - if usrMsg, _ := checkMsg(msg, false, false); usrMsg { - msgs = append(msgs, msg) - } - } - var hbTimer *time.Timer - var hbErr error - if err == nil && len(msgs) < batch { - // For batch real size of 1, it does not make sense to set no_wait in - // the request. - noWait := batch-len(msgs) > 1 - - var nr nextRequest - - sendReq := func() error { - // The current deadline for the context will be used - // to set the expires TTL for a fetch request. - deadline, _ := ctx.Deadline() - ttl = time.Until(deadline) - - // Check if context has already been canceled or expired. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - // Make our request expiration a bit shorter than the current timeout. - expires := ttl - if ttl >= 20*time.Millisecond { - expires = ttl - 10*time.Millisecond - } - - nr.Batch = batch - len(msgs) - nr.Expires = expires - nr.NoWait = noWait - nr.MaxBytes = o.maxBytes - if 2*o.hb < expires { - nr.Heartbeat = o.hb - } else { - nr.Heartbeat = 0 - } - req, _ := json.Marshal(nr) - if err := nc.PublishRequest(nms, rply, req); err != nil { - return err - } - if o.hb > 0 { - if hbTimer == nil { - hbTimer = time.AfterFunc(2*o.hb, func() { - hbErr = ErrNoHeartbeat - cancel() - }) - } else { - hbTimer.Reset(2 * o.hb) - } - } - return nil - } - - err = sendReq() - for err == nil && len(msgs) < batch { - // Ask for next message and wait if there are no messages - msg, err = sub.nextMsgWithContext(ctx, true, true) - if err == nil { - if hbTimer != nil { - hbTimer.Reset(2 * o.hb) - } - var usrMsg bool - - usrMsg, err = checkMsg(msg, true, noWait) - if err == nil && usrMsg { - msgs = append(msgs, msg) - } else if noWait && (errors.Is(err, errNoMessages) || errors.Is(err, errRequestsPending)) && len(msgs) == 0 { - // If we have a 404/408 for our "no_wait" request and have - // not collected any message, then resend request to - // wait this time. - noWait = false - err = sendReq() - } else if errors.Is(err, ErrTimeout) && len(msgs) == 0 { - // If we get a 408, we will bail if we already collected some - // messages, otherwise ignore and go back calling nextMsg. - err = nil - } - } - } - if hbTimer != nil { - hbTimer.Stop() - } - } - // If there is at least a message added to msgs, then need to return OK and no error - if err != nil && len(msgs) == 0 { - if hbErr != nil { - return nil, hbErr - } - return nil, o.checkCtxErr(err) - } - return msgs, nil -} - -// newFetchInbox returns subject used as reply subject when sending pull requests -// as well as request ID. For non-wildcard subject, request ID is empty and -// passed subject is not transformed -func newFetchInbox(subj string) (string, string) { - if !strings.HasSuffix(subj, ".*") { - return subj, "" - } - reqID := nuid.Next() - var sb strings.Builder - sb.WriteString(subj[:len(subj)-1]) - sb.WriteString(reqID) - return sb.String(), reqID -} - -func subjectMatchesReqID(subject, reqID string) bool { - subjectParts := strings.Split(subject, ".") - if len(subjectParts) < 2 { - return false - } - return subjectParts[len(subjectParts)-1] == reqID -} - -// MessageBatch provides methods to retrieve messages consumed using [Subscribe.FetchBatch]. -type MessageBatch interface { - // Messages returns a channel on which messages will be published. - Messages() <-chan *Msg - - // Error returns an error encountered when fetching messages. - Error() error - - // Done signals end of execution. - Done() <-chan struct{} -} - -type messageBatch struct { - msgs chan *Msg - err error - done chan struct{} -} - -func (mb *messageBatch) Messages() <-chan *Msg { - return mb.msgs -} - -func (mb *messageBatch) Error() error { - return mb.err -} - -func (mb *messageBatch) Done() <-chan struct{} { - return mb.done -} - -// FetchBatch pulls a batch of messages from a stream for a pull consumer. -// Unlike [Subscription.Fetch], it is non blocking and returns [MessageBatch], -// allowing to retrieve incoming messages from a channel. -// The returned channel is always closed after all messages for a batch have been -// delivered by the server - it is safe to iterate over it using range. -// -// To avoid using default JetStream timeout as fetch expiry time, use [nats.MaxWait] -// or [nats.Context] (with deadline set). -// -// This method will not return error in case of pull request expiry (even if there are no messages). -// Any other error encountered when receiving messages will cause FetchBatch to stop receiving new messages. -func (sub *Subscription) FetchBatch(batch int, opts ...PullOpt) (MessageBatch, error) { - if sub == nil { - return nil, ErrBadSubscription - } - if batch < 1 { - return nil, ErrInvalidArg - } - - var o pullOpts - for _, opt := range opts { - if err := opt.configurePull(&o); err != nil { - return nil, err - } - } - if o.ctx != nil && o.ttl != 0 { - return nil, ErrContextAndTimeout - } - sub.mu.Lock() - jsi := sub.jsi - // Reject if this is not a pull subscription. Note that sub.typ is SyncSubscription, - // so check for jsi.pull boolean instead. - if jsi == nil || !jsi.pull { - sub.mu.Unlock() - return nil, ErrTypeSubscription - } - - nc := sub.conn - nms := sub.jsi.nms - rply, reqID := newFetchInbox(sub.jsi.deliver) - js := sub.jsi.js - pmc := len(sub.mch) > 0 - - // All fetch requests have an expiration, in case of no explicit expiration - // then the default timeout of the JetStream context is used. - ttl := o.ttl - if ttl == 0 { - ttl = js.opts.wait - } - sub.mu.Unlock() - - // Use the given context or setup a default one for the span - // of the pull batch request. - var ( - ctx = o.ctx - cancel context.CancelFunc - cancelContext = true - ) - if ctx == nil { - ctx, cancel = context.WithTimeout(context.Background(), ttl) - } else if _, hasDeadline := ctx.Deadline(); !hasDeadline { - // Prevent from passing the background context which will just block - // and cannot be canceled either. - if octx, ok := ctx.(ContextOpt); ok && octx.Context == context.Background() { - return nil, ErrNoDeadlineContext - } - - // If the context did not have a deadline, then create a new child context - // that will use the default timeout from the JS context. - ctx, cancel = context.WithTimeout(ctx, ttl) - } else { - ctx, cancel = context.WithCancel(ctx) - } - defer func() { - // only cancel the context here if we are sure the fetching goroutine has not been started yet - if cancelContext { - cancel() - } - }() - - // if heartbeat is set, validate it against the context timeout - if o.hb > 0 { - deadline, _ := ctx.Deadline() - if 2*o.hb >= time.Until(deadline) { - return nil, fmt.Errorf("%w: idle heartbeat value too large", ErrInvalidArg) - } - } - - // Check if context not done already before making the request. - select { - case <-ctx.Done(): - if o.ctx != nil { // Timeout or Cancel triggered by context object option - return nil, ctx.Err() - } else { // Timeout triggered by timeout option - return nil, ErrTimeout - } - default: - } - - result := &messageBatch{ - msgs: make(chan *Msg, batch), - done: make(chan struct{}, 1), - } - var msg *Msg - for pmc && len(result.msgs) < batch { - // Check next msg with booleans that say that this is an internal call - // for a pull subscribe (so don't reject it) and don't wait if there - // are no messages. - msg, err := sub.nextMsgWithContext(ctx, true, false) - if err != nil { - if errors.Is(err, errNoMessages) { - err = nil - } - result.err = err - break - } - // Check msg but just to determine if this is a user message - // or status message, however, we don't care about values of status - // messages at this point in the Fetch() call, so checkMsg can't - // return an error. - if usrMsg, _ := checkMsg(msg, false, false); usrMsg { - result.msgs <- msg - } - } - if len(result.msgs) == batch || result.err != nil { - close(result.msgs) - result.done <- struct{}{} - return result, nil - } - - deadline, _ := ctx.Deadline() - ttl = time.Until(deadline) - - // Make our request expiration a bit shorter than the current timeout. - expires := ttl - if ttl >= 20*time.Millisecond { - expires = ttl - 10*time.Millisecond - } - - requestBatch := batch - len(result.msgs) - req := nextRequest{ - Expires: expires, - Batch: requestBatch, - MaxBytes: o.maxBytes, - Heartbeat: o.hb, - } - reqJSON, err := json.Marshal(req) - if err != nil { - close(result.msgs) - result.done <- struct{}{} - result.err = err - return result, nil - } - if err := nc.PublishRequest(nms, rply, reqJSON); err != nil { - if len(result.msgs) == 0 { - return nil, err - } - close(result.msgs) - result.done <- struct{}{} - result.err = err - return result, nil - } - var hbTimer *time.Timer - var hbErr error - if o.hb > 0 { - hbTimer = time.AfterFunc(2*o.hb, func() { - hbErr = ErrNoHeartbeat - cancel() - }) - } - cancelContext = false - go func() { - defer cancel() - var requestMsgs int - for requestMsgs < requestBatch { - // Ask for next message and wait if there are no messages - msg, err = sub.nextMsgWithContext(ctx, true, true) - if err != nil { - break - } - if hbTimer != nil { - hbTimer.Reset(2 * o.hb) - } - var usrMsg bool - - usrMsg, err = checkMsg(msg, true, false) - if err != nil { - if errors.Is(err, ErrTimeout) { - if reqID != "" && !subjectMatchesReqID(msg.Subject, reqID) { - // ignore timeout message from server if it comes from a different pull request - continue - } - err = nil - } - break - } - if usrMsg { - result.msgs <- msg - requestMsgs++ - } - } - if err != nil { - if hbErr != nil { - result.err = hbErr - } else { - result.err = o.checkCtxErr(err) - } - } - close(result.msgs) - result.done <- struct{}{} - }() - return result, nil -} - -// checkCtxErr is used to determine whether ErrTimeout should be returned in case of context timeout -func (o *pullOpts) checkCtxErr(err error) error { - if o.ctx == nil && errors.Is(err, context.DeadlineExceeded) { - return ErrTimeout - } - return err -} - -func (js *js) getConsumerInfo(stream, consumer string) (*ConsumerInfo, error) { - ctx, cancel := context.WithTimeout(context.Background(), js.opts.wait) - defer cancel() - return js.getConsumerInfoContext(ctx, stream, consumer) -} - -func (js *js) getConsumerInfoContext(ctx context.Context, stream, consumer string) (*ConsumerInfo, error) { - ccInfoSubj := fmt.Sprintf(apiConsumerInfoT, stream, consumer) - resp, err := js.apiRequestWithContext(ctx, js.apiSubj(ccInfoSubj), nil) - if err != nil { - if errors.Is(err, ErrNoResponders) { - err = ErrJetStreamNotEnabled - } - return nil, err - } - - var info consumerResponse - if err := json.Unmarshal(resp.Data, &info); err != nil { - return nil, err - } - if info.Error != nil { - if errors.Is(info.Error, ErrConsumerNotFound) { - return nil, ErrConsumerNotFound - } - if errors.Is(info.Error, ErrStreamNotFound) { - return nil, ErrStreamNotFound - } - return nil, info.Error - } - if info.Error == nil && info.ConsumerInfo == nil { - return nil, ErrConsumerNotFound - } - return info.ConsumerInfo, nil -} - -// a RequestWithContext with tracing via TraceCB -func (js *js) apiRequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) { - if js.opts.shouldTrace { - ctrace := js.opts.ctrace - if ctrace.RequestSent != nil { - ctrace.RequestSent(subj, data) - } - } - resp, err := js.nc.RequestWithContext(ctx, subj, data) - if err != nil { - return nil, err - } - if js.opts.shouldTrace { - ctrace := js.opts.ctrace - if ctrace.RequestSent != nil { - ctrace.ResponseReceived(subj, resp.Data, resp.Header) - } - } - - return resp, nil -} - -func (m *Msg) checkReply() error { - if m == nil || m.Sub == nil { - return ErrMsgNotBound - } - if m.Reply == _EMPTY_ { - return ErrMsgNoReply - } - return nil -} - -// ackReply handles all acks. Will do the right thing for pull and sync mode. -// It ensures that an ack is only sent a single time, regardless of -// how many times it is being called to avoid duplicated acks. -func (m *Msg) ackReply(ackType []byte, sync bool, opts ...AckOpt) error { - var o ackOpts - for _, opt := range opts { - if err := opt.configureAck(&o); err != nil { - return err - } - } - - if err := m.checkReply(); err != nil { - return err - } - - var ackNone bool - var js *js - - sub := m.Sub - sub.mu.Lock() - nc := sub.conn - if jsi := sub.jsi; jsi != nil { - js = jsi.js - ackNone = jsi.ackNone - } - sub.mu.Unlock() - - // Skip if already acked. - if atomic.LoadUint32(&m.ackd) == 1 { - return ErrMsgAlreadyAckd - } - if ackNone { - return ErrCantAckIfConsumerAckNone - } - - usesCtx := o.ctx != nil - usesWait := o.ttl > 0 - - // Only allow either AckWait or Context option to set the timeout. - if usesWait && usesCtx { - return ErrContextAndTimeout - } - - sync = sync || usesCtx || usesWait - ctx := o.ctx - wait := defaultRequestWait - if usesWait { - wait = o.ttl - } else if js != nil { - wait = js.opts.wait - } - - var body []byte - var err error - // This will be > 0 only when called from NakWithDelay() - if o.nakDelay > 0 { - body = []byte(fmt.Sprintf("%s {\"delay\": %d}", ackType, o.nakDelay.Nanoseconds())) - } else { - body = ackType - } - - if sync { - if usesCtx { - _, err = nc.RequestWithContext(ctx, m.Reply, body) - } else { - _, err = nc.Request(m.Reply, body, wait) - } - } else { - err = nc.Publish(m.Reply, body) - } - - // Mark that the message has been acked unless it is ackProgress - // which can be sent many times. - if err == nil && !bytes.Equal(ackType, ackProgress) { - atomic.StoreUint32(&m.ackd, 1) - } - - return err -} - -// Ack acknowledges a message. This tells the server that the message was -// successfully processed and it can move on to the next message. -func (m *Msg) Ack(opts ...AckOpt) error { - return m.ackReply(ackAck, false, opts...) -} - -// AckSync is the synchronous version of Ack. This indicates successful message -// processing. -func (m *Msg) AckSync(opts ...AckOpt) error { - return m.ackReply(ackAck, true, opts...) -} - -// Nak negatively acknowledges a message. This tells the server to redeliver -// the message. You can configure the number of redeliveries by passing -// nats.MaxDeliver when you Subscribe. The default is infinite redeliveries. -func (m *Msg) Nak(opts ...AckOpt) error { - return m.ackReply(ackNak, false, opts...) -} - -// Nak negatively acknowledges a message. This tells the server to redeliver -// the message after the give `delay` duration. You can configure the number -// of redeliveries by passing nats.MaxDeliver when you Subscribe. -// The default is infinite redeliveries. -func (m *Msg) NakWithDelay(delay time.Duration, opts ...AckOpt) error { - if delay > 0 { - opts = append(opts, nakDelay(delay)) - } - return m.ackReply(ackNak, false, opts...) -} - -// Term tells the server to not redeliver this message, regardless of the value -// of nats.MaxDeliver. -func (m *Msg) Term(opts ...AckOpt) error { - return m.ackReply(ackTerm, false, opts...) -} - -// InProgress tells the server that this message is being worked on. It resets -// the redelivery timer on the server. -func (m *Msg) InProgress(opts ...AckOpt) error { - return m.ackReply(ackProgress, false, opts...) -} - -// MsgMetadata is the JetStream metadata associated with received messages. -type MsgMetadata struct { - Sequence SequencePair - NumDelivered uint64 - NumPending uint64 - Timestamp time.Time - Stream string - Consumer string - Domain string -} - -// Metadata retrieves the metadata from a JetStream message. This method will -// return an error for non-JetStream Msgs. -func (m *Msg) Metadata() (*MsgMetadata, error) { - if err := m.checkReply(); err != nil { - return nil, err - } - - tokens, err := parser.GetMetadataFields(m.Reply) - if err != nil { - return nil, err - } - - meta := &MsgMetadata{ - Domain: tokens[parser.AckDomainTokenPos], - NumDelivered: parser.ParseNum(tokens[parser.AckNumDeliveredTokenPos]), - NumPending: parser.ParseNum(tokens[parser.AckNumPendingTokenPos]), - Timestamp: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))), - Stream: tokens[parser.AckStreamTokenPos], - Consumer: tokens[parser.AckConsumerTokenPos], - } - meta.Sequence.Stream = parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]) - meta.Sequence.Consumer = parser.ParseNum(tokens[parser.AckConsumerSeqTokenPos]) - return meta, nil -} - -// AckPolicy determines how the consumer should acknowledge delivered messages. -type AckPolicy int - -const ( - // AckNonePolicy requires no acks for delivered messages. - AckNonePolicy AckPolicy = iota - - // AckAllPolicy when acking a sequence number, this implicitly acks all - // sequences below this one as well. - AckAllPolicy - - // AckExplicitPolicy requires ack or nack for all messages. - AckExplicitPolicy - - // For configuration mismatch check - ackPolicyNotSet = 99 -) - -func jsonString(s string) string { - return "\"" + s + "\"" -} - -func (p *AckPolicy) UnmarshalJSON(data []byte) error { - switch string(data) { - case jsonString("none"): - *p = AckNonePolicy - case jsonString("all"): - *p = AckAllPolicy - case jsonString("explicit"): - *p = AckExplicitPolicy - default: - return fmt.Errorf("nats: can not unmarshal %q", data) - } - - return nil -} - -func (p AckPolicy) MarshalJSON() ([]byte, error) { - switch p { - case AckNonePolicy: - return json.Marshal("none") - case AckAllPolicy: - return json.Marshal("all") - case AckExplicitPolicy: - return json.Marshal("explicit") - default: - return nil, fmt.Errorf("nats: unknown acknowledgement policy %v", p) - } -} - -func (p AckPolicy) String() string { - switch p { - case AckNonePolicy: - return "AckNone" - case AckAllPolicy: - return "AckAll" - case AckExplicitPolicy: - return "AckExplicit" - case ackPolicyNotSet: - return "Not Initialized" - default: - return "Unknown AckPolicy" - } -} - -// ReplayPolicy determines how the consumer should replay messages it already has queued in the stream. -type ReplayPolicy int - -const ( - // ReplayInstantPolicy will replay messages as fast as possible. - ReplayInstantPolicy ReplayPolicy = iota - - // ReplayOriginalPolicy will maintain the same timing as the messages were received. - ReplayOriginalPolicy - - // For configuration mismatch check - replayPolicyNotSet = 99 -) - -func (p *ReplayPolicy) UnmarshalJSON(data []byte) error { - switch string(data) { - case jsonString("instant"): - *p = ReplayInstantPolicy - case jsonString("original"): - *p = ReplayOriginalPolicy - default: - return fmt.Errorf("nats: can not unmarshal %q", data) - } - - return nil -} - -func (p ReplayPolicy) MarshalJSON() ([]byte, error) { - switch p { - case ReplayOriginalPolicy: - return json.Marshal("original") - case ReplayInstantPolicy: - return json.Marshal("instant") - default: - return nil, fmt.Errorf("nats: unknown replay policy %v", p) - } -} - -var ( - ackAck = []byte("+ACK") - ackNak = []byte("-NAK") - ackProgress = []byte("+WPI") - ackTerm = []byte("+TERM") -) - -// DeliverPolicy determines how the consumer should select the first message to deliver. -type DeliverPolicy int - -const ( - // DeliverAllPolicy starts delivering messages from the very beginning of a - // stream. This is the default. - DeliverAllPolicy DeliverPolicy = iota - - // DeliverLastPolicy will start the consumer with the last sequence - // received. - DeliverLastPolicy - - // DeliverNewPolicy will only deliver new messages that are sent after the - // consumer is created. - DeliverNewPolicy - - // DeliverByStartSequencePolicy will deliver messages starting from a given - // sequence. - DeliverByStartSequencePolicy - - // DeliverByStartTimePolicy will deliver messages starting from a given - // time. - DeliverByStartTimePolicy - - // DeliverLastPerSubjectPolicy will start the consumer with the last message - // for all subjects received. - DeliverLastPerSubjectPolicy - - // For configuration mismatch check - deliverPolicyNotSet = 99 -) - -func (p *DeliverPolicy) UnmarshalJSON(data []byte) error { - switch string(data) { - case jsonString("all"), jsonString("undefined"): - *p = DeliverAllPolicy - case jsonString("last"): - *p = DeliverLastPolicy - case jsonString("new"): - *p = DeliverNewPolicy - case jsonString("by_start_sequence"): - *p = DeliverByStartSequencePolicy - case jsonString("by_start_time"): - *p = DeliverByStartTimePolicy - case jsonString("last_per_subject"): - *p = DeliverLastPerSubjectPolicy - } - - return nil -} - -func (p DeliverPolicy) MarshalJSON() ([]byte, error) { - switch p { - case DeliverAllPolicy: - return json.Marshal("all") - case DeliverLastPolicy: - return json.Marshal("last") - case DeliverNewPolicy: - return json.Marshal("new") - case DeliverByStartSequencePolicy: - return json.Marshal("by_start_sequence") - case DeliverByStartTimePolicy: - return json.Marshal("by_start_time") - case DeliverLastPerSubjectPolicy: - return json.Marshal("last_per_subject") - default: - return nil, fmt.Errorf("nats: unknown deliver policy %v", p) - } -} - -// RetentionPolicy determines how messages in a set are retained. -type RetentionPolicy int - -const ( - // LimitsPolicy (default) means that messages are retained until any given limit is reached. - // This could be one of MaxMsgs, MaxBytes, or MaxAge. - LimitsPolicy RetentionPolicy = iota - // InterestPolicy specifies that when all known observables have acknowledged a message it can be removed. - InterestPolicy - // WorkQueuePolicy specifies that when the first worker or subscriber acknowledges the message it can be removed. - WorkQueuePolicy -) - -// DiscardPolicy determines how to proceed when limits of messages or bytes are -// reached. -type DiscardPolicy int - -const ( - // DiscardOld will remove older messages to return to the limits. This is - // the default. - DiscardOld DiscardPolicy = iota - //DiscardNew will fail to store new messages. - DiscardNew -) - -const ( - limitsPolicyString = "limits" - interestPolicyString = "interest" - workQueuePolicyString = "workqueue" -) - -func (rp RetentionPolicy) String() string { - switch rp { - case LimitsPolicy: - return "Limits" - case InterestPolicy: - return "Interest" - case WorkQueuePolicy: - return "WorkQueue" - default: - return "Unknown Retention Policy" - } -} - -func (rp RetentionPolicy) MarshalJSON() ([]byte, error) { - switch rp { - case LimitsPolicy: - return json.Marshal(limitsPolicyString) - case InterestPolicy: - return json.Marshal(interestPolicyString) - case WorkQueuePolicy: - return json.Marshal(workQueuePolicyString) - default: - return nil, fmt.Errorf("nats: can not marshal %v", rp) - } -} - -func (rp *RetentionPolicy) UnmarshalJSON(data []byte) error { - switch string(data) { - case jsonString(limitsPolicyString): - *rp = LimitsPolicy - case jsonString(interestPolicyString): - *rp = InterestPolicy - case jsonString(workQueuePolicyString): - *rp = WorkQueuePolicy - default: - return fmt.Errorf("nats: can not unmarshal %q", data) - } - return nil -} - -func (dp DiscardPolicy) String() string { - switch dp { - case DiscardOld: - return "DiscardOld" - case DiscardNew: - return "DiscardNew" - default: - return "Unknown Discard Policy" - } -} - -func (dp DiscardPolicy) MarshalJSON() ([]byte, error) { - switch dp { - case DiscardOld: - return json.Marshal("old") - case DiscardNew: - return json.Marshal("new") - default: - return nil, fmt.Errorf("nats: can not marshal %v", dp) - } -} - -func (dp *DiscardPolicy) UnmarshalJSON(data []byte) error { - switch strings.ToLower(string(data)) { - case jsonString("old"): - *dp = DiscardOld - case jsonString("new"): - *dp = DiscardNew - default: - return fmt.Errorf("nats: can not unmarshal %q", data) - } - return nil -} - -// StorageType determines how messages are stored for retention. -type StorageType int - -const ( - // FileStorage specifies on disk storage. It's the default. - FileStorage StorageType = iota - // MemoryStorage specifies in memory only. - MemoryStorage -) - -const ( - memoryStorageString = "memory" - fileStorageString = "file" -) - -func (st StorageType) String() string { - switch st { - case MemoryStorage: - return "Memory" - case FileStorage: - return "File" - default: - return "Unknown Storage Type" - } -} - -func (st StorageType) MarshalJSON() ([]byte, error) { - switch st { - case MemoryStorage: - return json.Marshal(memoryStorageString) - case FileStorage: - return json.Marshal(fileStorageString) - default: - return nil, fmt.Errorf("nats: can not marshal %v", st) - } -} - -func (st *StorageType) UnmarshalJSON(data []byte) error { - switch string(data) { - case jsonString(memoryStorageString): - *st = MemoryStorage - case jsonString(fileStorageString): - *st = FileStorage - default: - return fmt.Errorf("nats: can not unmarshal %q", data) - } - return nil -} - -type StoreCompression uint8 - -const ( - NoCompression StoreCompression = iota - S2Compression -) - -func (alg StoreCompression) String() string { - switch alg { - case NoCompression: - return "None" - case S2Compression: - return "S2" - default: - return "Unknown StoreCompression" - } -} - -func (alg StoreCompression) MarshalJSON() ([]byte, error) { - var str string - switch alg { - case S2Compression: - str = "s2" - case NoCompression: - str = "none" - default: - return nil, fmt.Errorf("unknown compression algorithm") - } - return json.Marshal(str) -} - -func (alg *StoreCompression) UnmarshalJSON(b []byte) error { - var str string - if err := json.Unmarshal(b, &str); err != nil { - return err - } - switch str { - case "s2": - *alg = S2Compression - case "none": - *alg = NoCompression - default: - return fmt.Errorf("unknown compression algorithm") - } - return nil -} - -// Length of our hash used for named consumers. -const nameHashLen = 8 - -// Computes a hash for the given `name`. -func getHash(name string) string { - sha := sha256.New() - sha.Write([]byte(name)) - b := sha.Sum(nil) - for i := 0; i < nameHashLen; i++ { - b[i] = rdigits[int(b[i]%base)] - } - return string(b[:nameHashLen]) -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jserrors.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/jserrors.go deleted file mode 100644 index ef5d4af..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jserrors.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright 2020-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "errors" - "fmt" -) - -var ( - // API errors - - // ErrJetStreamNotEnabled is an error returned when JetStream is not enabled for an account. - ErrJetStreamNotEnabled JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabled, Description: "jetstream not enabled", Code: 503}} - - // ErrJetStreamNotEnabledForAccount is an error returned when JetStream is not enabled for an account. - ErrJetStreamNotEnabledForAccount JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabledForAccount, Description: "jetstream not enabled for account", Code: 503}} - - // ErrStreamNotFound is an error returned when stream with given name does not exist. - ErrStreamNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNotFound, Description: "stream not found", Code: 404}} - - // ErrStreamNameAlreadyInUse is returned when a stream with given name already exists and has a different configuration. - ErrStreamNameAlreadyInUse JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNameInUse, Description: "stream name already in use", Code: 400}} - - // ErrStreamSubjectTransformNotSupported is returned when the connected nats-server version does not support setting - // the stream subject transform. If this error is returned when executing AddStream(), the stream with invalid - // configuration was already created in the server. - ErrStreamSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"} - - // ErrStreamSourceSubjectTransformNotSupported is returned when the connected nats-server version does not support setting - // the stream source subject transform. If this error is returned when executing AddStream(), the stream with invalid - // configuration was already created in the server. - ErrStreamSourceSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"} - - // ErrStreamSourceNotSupported is returned when the connected nats-server version does not support setting - // the stream sources. If this error is returned when executing AddStream(), the stream with invalid - // configuration was already created in the server. - ErrStreamSourceNotSupported JetStreamError = &jsError{message: "stream sourcing is not supported by nats-server"} - - // ErrStreamSourceMultipleSubjectTransformsNotSupported is returned when the connected nats-server version does not support setting - // the stream sources. If this error is returned when executing AddStream(), the stream with invalid - // configuration was already created in the server. - ErrStreamSourceMultipleSubjectTransformsNotSupported JetStreamError = &jsError{message: "stream sourceing with multiple subject transforms not supported by nats-server"} - - // ErrConsumerNotFound is an error returned when consumer with given name does not exist. - ErrConsumerNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerNotFound, Description: "consumer not found", Code: 404}} - - // ErrMsgNotFound is returned when message with provided sequence number does npt exist. - ErrMsgNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeMessageNotFound, Description: "message not found", Code: 404}} - - // ErrBadRequest is returned when invalid request is sent to JetStream API. - ErrBadRequest JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeBadRequest, Description: "bad request", Code: 400}} - - // ErrDuplicateFilterSubjects is returned when both FilterSubject and FilterSubjects are specified when creating consumer. - ErrDuplicateFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeDuplicateFilterSubjects, Description: "consumer cannot have both FilterSubject and FilterSubjects specified", Code: 500}} - - // ErrDuplicateFilterSubjects is returned when filter subjects overlap when creating consumer. - ErrOverlappingFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeOverlappingFilterSubjects, Description: "consumer subject filters cannot overlap", Code: 500}} - - // ErrEmptyFilter is returned when a filter in FilterSubjects is empty. - ErrEmptyFilter JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerEmptyFilter, Description: "consumer filter in FilterSubjects cannot be empty", Code: 500}} - - // Client errors - - // ErrConsumerNameAlreadyInUse is an error returned when consumer with given name already exists. - ErrConsumerNameAlreadyInUse JetStreamError = &jsError{message: "consumer name already in use"} - - // ErrConsumerNotActive is an error returned when consumer is not active. - ErrConsumerNotActive JetStreamError = &jsError{message: "consumer not active"} - - // ErrInvalidJSAck is returned when JetStream ack from message publish is invalid. - ErrInvalidJSAck JetStreamError = &jsError{message: "invalid jetstream publish response"} - - // ErrStreamConfigRequired is returned when empty stream configuration is supplied to add/update stream. - ErrStreamConfigRequired JetStreamError = &jsError{message: "stream configuration is required"} - - // ErrStreamNameRequired is returned when the provided stream name is empty. - ErrStreamNameRequired JetStreamError = &jsError{message: "stream name is required"} - - // ErrConsumerNameRequired is returned when the provided consumer durable name is empty. - ErrConsumerNameRequired JetStreamError = &jsError{message: "consumer name is required"} - - // ErrConsumerMultipleFilterSubjectsNotSupported is returned when the connected nats-server version does not support setting - // multiple filter subjects with filter_subjects field. If this error is returned when executing AddConsumer(), the consumer with invalid - // configuration was already created in the server. - ErrConsumerMultipleFilterSubjectsNotSupported JetStreamError = &jsError{message: "multiple consumer filter subjects not supported by nats-server"} - - // ErrConsumerConfigRequired is returned when empty consumer consuguration is supplied to add/update consumer. - ErrConsumerConfigRequired JetStreamError = &jsError{message: "consumer configuration is required"} - - // ErrPullSubscribeToPushConsumer is returned when attempting to use PullSubscribe on push consumer. - ErrPullSubscribeToPushConsumer JetStreamError = &jsError{message: "cannot pull subscribe to push based consumer"} - - // ErrPullSubscribeRequired is returned when attempting to use subscribe methods not suitable for pull consumers for pull consumers. - ErrPullSubscribeRequired JetStreamError = &jsError{message: "must use pull subscribe to bind to pull based consumer"} - - // ErrMsgAlreadyAckd is returned when attempting to acknowledge message more than once. - ErrMsgAlreadyAckd JetStreamError = &jsError{message: "message was already acknowledged"} - - // ErrNoStreamResponse is returned when there is no response from stream (e.g. no responders error). - ErrNoStreamResponse JetStreamError = &jsError{message: "no response from stream"} - - // ErrNotJSMessage is returned when attempting to get metadata from non JetStream message . - ErrNotJSMessage JetStreamError = &jsError{message: "not a jetstream message"} - - // ErrInvalidStreamName is returned when the provided stream name is invalid (contains '.' or ' '). - ErrInvalidStreamName JetStreamError = &jsError{message: "invalid stream name"} - - // ErrInvalidConsumerName is returned when the provided consumer name is invalid (contains '.' or ' '). - ErrInvalidConsumerName JetStreamError = &jsError{message: "invalid consumer name"} - - // ErrNoMatchingStream is returned when stream lookup by subject is unsuccessful. - ErrNoMatchingStream JetStreamError = &jsError{message: "no stream matches subject"} - - // ErrSubjectMismatch is returned when the provided subject does not match consumer's filter subject. - ErrSubjectMismatch JetStreamError = &jsError{message: "subject does not match consumer"} - - // ErrContextAndTimeout is returned when attempting to use both context and timeout. - ErrContextAndTimeout JetStreamError = &jsError{message: "context and timeout can not both be set"} - - // ErrCantAckIfConsumerAckNone is returned when attempting to ack a message for consumer with AckNone policy set. - ErrCantAckIfConsumerAckNone JetStreamError = &jsError{message: "cannot acknowledge a message for a consumer with AckNone policy"} - - // ErrConsumerDeleted is returned when attempting to send pull request to a consumer which does not exist - ErrConsumerDeleted JetStreamError = &jsError{message: "consumer deleted"} - - // ErrConsumerLeadershipChanged is returned when pending requests are no longer valid after leadership has changed - ErrConsumerLeadershipChanged JetStreamError = &jsError{message: "Leadership Changed"} - - // ErrNoHeartbeat is returned when no heartbeat is received from server when sending requests with pull consumer. - ErrNoHeartbeat JetStreamError = &jsError{message: "no heartbeat received"} - - // DEPRECATED: ErrInvalidDurableName is no longer returned and will be removed in future releases. - // Use ErrInvalidConsumerName instead. - ErrInvalidDurableName = errors.New("nats: invalid durable name") -) - -// Error code represents JetStream error codes returned by the API -type ErrorCode uint16 - -const ( - JSErrCodeJetStreamNotEnabledForAccount ErrorCode = 10039 - JSErrCodeJetStreamNotEnabled ErrorCode = 10076 - JSErrCodeInsufficientResourcesErr ErrorCode = 10023 - - JSErrCodeStreamNotFound ErrorCode = 10059 - JSErrCodeStreamNameInUse ErrorCode = 10058 - - JSErrCodeConsumerNotFound ErrorCode = 10014 - JSErrCodeConsumerNameExists ErrorCode = 10013 - JSErrCodeConsumerAlreadyExists ErrorCode = 10105 - JSErrCodeDuplicateFilterSubjects ErrorCode = 10136 - JSErrCodeOverlappingFilterSubjects ErrorCode = 10138 - JSErrCodeConsumerEmptyFilter ErrorCode = 10139 - - JSErrCodeMessageNotFound ErrorCode = 10037 - - JSErrCodeBadRequest ErrorCode = 10003 - JSStreamInvalidConfig ErrorCode = 10052 - - JSErrCodeStreamWrongLastSequence ErrorCode = 10071 -) - -// APIError is included in all API responses if there was an error. -type APIError struct { - Code int `json:"code"` - ErrorCode ErrorCode `json:"err_code"` - Description string `json:"description,omitempty"` -} - -// Error prints the JetStream API error code and description -func (e *APIError) Error() string { - return fmt.Sprintf("nats: %s", e.Description) -} - -// APIError implements the JetStreamError interface. -func (e *APIError) APIError() *APIError { - return e -} - -// Is matches against an APIError. -func (e *APIError) Is(err error) bool { - if e == nil { - return false - } - // Extract internal APIError to match against. - var aerr *APIError - ok := errors.As(err, &aerr) - if !ok { - return ok - } - return e.ErrorCode == aerr.ErrorCode -} - -// JetStreamError is an error result that happens when using JetStream. -// In case of client-side error, `APIError()` returns nil -type JetStreamError interface { - APIError() *APIError - error -} - -type jsError struct { - apiErr *APIError - message string -} - -func (err *jsError) APIError() *APIError { - return err.apiErr -} - -func (err *jsError) Error() string { - if err.apiErr != nil && err.apiErr.Description != "" { - return err.apiErr.Error() - } - return fmt.Sprintf("nats: %s", err.message) -} - -func (err *jsError) Unwrap() error { - // Allow matching to embedded APIError in case there is one. - if err.apiErr == nil { - return nil - } - return err.apiErr -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/jsm.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/jsm.go deleted file mode 100644 index 94fa86c..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/jsm.go +++ /dev/null @@ -1,1675 +0,0 @@ -// Copyright 2021-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "strconv" - "strings" - "time" -) - -// JetStreamManager manages JetStream Streams and Consumers. -type JetStreamManager interface { - // AddStream creates a stream. - AddStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) - - // UpdateStream updates a stream. - UpdateStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) - - // DeleteStream deletes a stream. - DeleteStream(name string, opts ...JSOpt) error - - // StreamInfo retrieves information from a stream. - StreamInfo(stream string, opts ...JSOpt) (*StreamInfo, error) - - // PurgeStream purges a stream messages. - PurgeStream(name string, opts ...JSOpt) error - - // StreamsInfo can be used to retrieve a list of StreamInfo objects. - // DEPRECATED: Use Streams() instead. - StreamsInfo(opts ...JSOpt) <-chan *StreamInfo - - // Streams can be used to retrieve a list of StreamInfo objects. - Streams(opts ...JSOpt) <-chan *StreamInfo - - // StreamNames is used to retrieve a list of Stream names. - StreamNames(opts ...JSOpt) <-chan string - - // GetMsg retrieves a raw stream message stored in JetStream by sequence number. - // Use options nats.DirectGet() or nats.DirectGetNext() to trigger retrieval - // directly from a distributed group of servers (leader and replicas). - // The stream must have been created/updated with the AllowDirect boolean. - GetMsg(name string, seq uint64, opts ...JSOpt) (*RawStreamMsg, error) - - // GetLastMsg retrieves the last raw stream message stored in JetStream by subject. - // Use option nats.DirectGet() to trigger retrieval - // directly from a distributed group of servers (leader and replicas). - // The stream must have been created/updated with the AllowDirect boolean. - GetLastMsg(name, subject string, opts ...JSOpt) (*RawStreamMsg, error) - - // DeleteMsg deletes a message from a stream. The message is marked as erased, but its value is not overwritten. - DeleteMsg(name string, seq uint64, opts ...JSOpt) error - - // SecureDeleteMsg deletes a message from a stream. The deleted message is overwritten with random data - // As a result, this operation is slower than DeleteMsg() - SecureDeleteMsg(name string, seq uint64, opts ...JSOpt) error - - // AddConsumer adds a consumer to a stream. - // If the consumer already exists, and the configuration is the same, it - // will return the existing consumer. - // If the consumer already exists, and the configuration is different, it - // will return ErrConsumerNameAlreadyInUse. - AddConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) - - // UpdateConsumer updates an existing consumer. - UpdateConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) - - // DeleteConsumer deletes a consumer. - DeleteConsumer(stream, consumer string, opts ...JSOpt) error - - // ConsumerInfo retrieves information of a consumer from a stream. - ConsumerInfo(stream, name string, opts ...JSOpt) (*ConsumerInfo, error) - - // ConsumersInfo is used to retrieve a list of ConsumerInfo objects. - // DEPRECATED: Use Consumers() instead. - ConsumersInfo(stream string, opts ...JSOpt) <-chan *ConsumerInfo - - // Consumers is used to retrieve a list of ConsumerInfo objects. - Consumers(stream string, opts ...JSOpt) <-chan *ConsumerInfo - - // ConsumerNames is used to retrieve a list of Consumer names. - ConsumerNames(stream string, opts ...JSOpt) <-chan string - - // AccountInfo retrieves info about the JetStream usage from an account. - AccountInfo(opts ...JSOpt) (*AccountInfo, error) - - // StreamNameBySubject returns a stream matching given subject. - StreamNameBySubject(string, ...JSOpt) (string, error) -} - -// StreamConfig will determine the properties for a stream. -// There are sensible defaults for most. If no subjects are -// given the name will be used as the only subject. -type StreamConfig struct { - Name string `json:"name"` - Description string `json:"description,omitempty"` - Subjects []string `json:"subjects,omitempty"` - Retention RetentionPolicy `json:"retention"` - MaxConsumers int `json:"max_consumers"` - MaxMsgs int64 `json:"max_msgs"` - MaxBytes int64 `json:"max_bytes"` - Discard DiscardPolicy `json:"discard"` - DiscardNewPerSubject bool `json:"discard_new_per_subject,omitempty"` - MaxAge time.Duration `json:"max_age"` - MaxMsgsPerSubject int64 `json:"max_msgs_per_subject"` - MaxMsgSize int32 `json:"max_msg_size,omitempty"` - Storage StorageType `json:"storage"` - Replicas int `json:"num_replicas"` - NoAck bool `json:"no_ack,omitempty"` - Template string `json:"template_owner,omitempty"` - Duplicates time.Duration `json:"duplicate_window,omitempty"` - Placement *Placement `json:"placement,omitempty"` - Mirror *StreamSource `json:"mirror,omitempty"` - Sources []*StreamSource `json:"sources,omitempty"` - Sealed bool `json:"sealed,omitempty"` - DenyDelete bool `json:"deny_delete,omitempty"` - DenyPurge bool `json:"deny_purge,omitempty"` - AllowRollup bool `json:"allow_rollup_hdrs,omitempty"` - Compression StoreCompression `json:"compression"` - FirstSeq uint64 `json:"first_seq,omitempty"` - - // Allow applying a subject transform to incoming messages before doing anything else. - SubjectTransform *SubjectTransformConfig `json:"subject_transform,omitempty"` - - // Allow republish of the message after being sequenced and stored. - RePublish *RePublish `json:"republish,omitempty"` - - // Allow higher performance, direct access to get individual messages. E.g. KeyValue - AllowDirect bool `json:"allow_direct"` - // Allow higher performance and unified direct access for mirrors as well. - MirrorDirect bool `json:"mirror_direct"` - - // Limits for consumers on this stream. - ConsumerLimits StreamConsumerLimits `json:"consumer_limits,omitempty"` - - // Metadata is additional metadata for the Stream. - // Keys starting with `_nats` are reserved. - // NOTE: Metadata requires nats-server v2.10.0+ - Metadata map[string]string `json:"metadata,omitempty"` -} - -// SubjectTransformConfig is for applying a subject transform (to matching messages) before doing anything else when a new message is received. -type SubjectTransformConfig struct { - Source string `json:"src,omitempty"` - Destination string `json:"dest"` -} - -// RePublish is for republishing messages once committed to a stream. The original -// subject cis remapped from the subject pattern to the destination pattern. -type RePublish struct { - Source string `json:"src,omitempty"` - Destination string `json:"dest"` - HeadersOnly bool `json:"headers_only,omitempty"` -} - -// Placement is used to guide placement of streams in clustered JetStream. -type Placement struct { - Cluster string `json:"cluster"` - Tags []string `json:"tags,omitempty"` -} - -// StreamSource dictates how streams can source from other streams. -type StreamSource struct { - Name string `json:"name"` - OptStartSeq uint64 `json:"opt_start_seq,omitempty"` - OptStartTime *time.Time `json:"opt_start_time,omitempty"` - FilterSubject string `json:"filter_subject,omitempty"` - SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"` - External *ExternalStream `json:"external,omitempty"` - Domain string `json:"-"` -} - -// ExternalStream allows you to qualify access to a stream source in another -// account. -type ExternalStream struct { - APIPrefix string `json:"api"` - DeliverPrefix string `json:"deliver,omitempty"` -} - -// StreamConsumerLimits are the limits for a consumer on a stream. -// These can be overridden on a per consumer basis. -type StreamConsumerLimits struct { - InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` - MaxAckPending int `json:"max_ack_pending,omitempty"` -} - -// Helper for copying when we do not want to change user's version. -func (ss *StreamSource) copy() *StreamSource { - nss := *ss - // Check pointers - if ss.OptStartTime != nil { - t := *ss.OptStartTime - nss.OptStartTime = &t - } - if ss.External != nil { - ext := *ss.External - nss.External = &ext - } - return &nss -} - -// If we have a Domain, convert to the appropriate ext.APIPrefix. -// This will change the stream source, so should be a copy passed in. -func (ss *StreamSource) convertDomain() error { - if ss.Domain == _EMPTY_ { - return nil - } - if ss.External != nil { - // These should be mutually exclusive. - // TODO(dlc) - Make generic? - return errors.New("nats: domain and external are both set") - } - ss.External = &ExternalStream{APIPrefix: fmt.Sprintf(jsExtDomainT, ss.Domain)} - return nil -} - -// apiResponse is a standard response from the JetStream JSON API -type apiResponse struct { - Type string `json:"type"` - Error *APIError `json:"error,omitempty"` -} - -// apiPaged includes variables used to create paged responses from the JSON API -type apiPaged struct { - Total int `json:"total"` - Offset int `json:"offset"` - Limit int `json:"limit"` -} - -// apiPagedRequest includes parameters allowing specific pages to be requested -// from APIs responding with apiPaged. -type apiPagedRequest struct { - Offset int `json:"offset,omitempty"` -} - -// AccountInfo contains info about the JetStream usage from the current account. -type AccountInfo struct { - Tier - Domain string `json:"domain"` - API APIStats `json:"api"` - Tiers map[string]Tier `json:"tiers"` -} - -type Tier struct { - Memory uint64 `json:"memory"` - Store uint64 `json:"storage"` - ReservedMemory uint64 `json:"reserved_memory"` - ReservedStore uint64 `json:"reserved_storage"` - Streams int `json:"streams"` - Consumers int `json:"consumers"` - Limits AccountLimits `json:"limits"` -} - -// APIStats reports on API calls to JetStream for this account. -type APIStats struct { - Total uint64 `json:"total"` - Errors uint64 `json:"errors"` -} - -// AccountLimits includes the JetStream limits of the current account. -type AccountLimits struct { - MaxMemory int64 `json:"max_memory"` - MaxStore int64 `json:"max_storage"` - MaxStreams int `json:"max_streams"` - MaxConsumers int `json:"max_consumers"` - MaxAckPending int `json:"max_ack_pending"` - MemoryMaxStreamBytes int64 `json:"memory_max_stream_bytes"` - StoreMaxStreamBytes int64 `json:"storage_max_stream_bytes"` - MaxBytesRequired bool `json:"max_bytes_required"` -} - -type accountInfoResponse struct { - apiResponse - AccountInfo -} - -// AccountInfo retrieves info about the JetStream usage from the current account. -// If JetStream is not enabled, this will return ErrJetStreamNotEnabled -// Other errors can happen but are generally considered retryable -func (js *js) AccountInfo(opts ...JSOpt) (*AccountInfo, error) { - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return nil, err - } - if cancel != nil { - defer cancel() - } - - resp, err := js.apiRequestWithContext(o.ctx, js.apiSubj(apiAccountInfo), nil) - if err != nil { - // todo maybe nats server should never have no responder on this subject and always respond if they know there is no js to be had - if errors.Is(err, ErrNoResponders) { - err = ErrJetStreamNotEnabled - } - return nil, err - } - var info accountInfoResponse - if err := json.Unmarshal(resp.Data, &info); err != nil { - return nil, err - } - if info.Error != nil { - // Internally checks based on error code instead of description match. - if errors.Is(info.Error, ErrJetStreamNotEnabledForAccount) { - return nil, ErrJetStreamNotEnabledForAccount - } - return nil, info.Error - } - - return &info.AccountInfo, nil -} - -type createConsumerRequest struct { - Stream string `json:"stream_name"` - Config *ConsumerConfig `json:"config"` -} - -type consumerResponse struct { - apiResponse - *ConsumerInfo -} - -// AddConsumer adds a consumer to a stream. -// If the consumer already exists, and the configuration is the same, it -// will return the existing consumer. -// If the consumer already exists, and the configuration is different, it -// will return ErrConsumerNameAlreadyInUse. -func (js *js) AddConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) { - if cfg == nil { - cfg = &ConsumerConfig{} - } - consumerName := cfg.Name - if consumerName == _EMPTY_ { - consumerName = cfg.Durable - } - if consumerName != _EMPTY_ { - consInfo, err := js.ConsumerInfo(stream, consumerName, opts...) - if err != nil && !errors.Is(err, ErrConsumerNotFound) && !errors.Is(err, ErrStreamNotFound) { - return nil, err - } - - if consInfo != nil { - sameConfig := checkConfig(&consInfo.Config, cfg) - if sameConfig != nil { - return nil, fmt.Errorf("%w: creating consumer %q on stream %q", ErrConsumerNameAlreadyInUse, consumerName, stream) - } else { - return consInfo, nil - } - } - } - - return js.upsertConsumer(stream, consumerName, cfg, opts...) -} - -func (js *js) UpdateConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) { - if cfg == nil { - return nil, ErrConsumerConfigRequired - } - consumerName := cfg.Name - if consumerName == _EMPTY_ { - consumerName = cfg.Durable - } - if consumerName == _EMPTY_ { - return nil, ErrConsumerNameRequired - } - return js.upsertConsumer(stream, consumerName, cfg, opts...) -} - -func (js *js) upsertConsumer(stream, consumerName string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) { - if err := checkStreamName(stream); err != nil { - return nil, err - } - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return nil, err - } - if cancel != nil { - defer cancel() - } - - req, err := json.Marshal(&createConsumerRequest{Stream: stream, Config: cfg}) - if err != nil { - return nil, err - } - - var ccSubj string - if consumerName == _EMPTY_ { - // if consumer name is empty (neither Durable nor Name is set), use the legacy ephemeral endpoint - ccSubj = fmt.Sprintf(apiLegacyConsumerCreateT, stream) - } else if err := checkConsumerName(consumerName); err != nil { - return nil, err - } else if js.nc.serverMinVersion(2, 9, 0) { - if cfg.Durable != "" && js.opts.featureFlags.useDurableConsumerCreate { - // if user set the useDurableConsumerCreate flag, use the legacy DURABLE.CREATE endpoint - ccSubj = fmt.Sprintf(apiDurableCreateT, stream, consumerName) - } else if cfg.FilterSubject == _EMPTY_ || cfg.FilterSubject == ">" { - // if filter subject is empty or ">", use the endpoint without filter subject - ccSubj = fmt.Sprintf(apiConsumerCreateT, stream, consumerName) - } else { - // if filter subject is not empty, use the endpoint with filter subject - ccSubj = fmt.Sprintf(apiConsumerCreateWithFilterSubjectT, stream, consumerName, cfg.FilterSubject) - } - } else { - if cfg.Durable != "" { - // if Durable is set, use the DURABLE.CREATE endpoint - ccSubj = fmt.Sprintf(apiDurableCreateT, stream, consumerName) - } else { - // if Durable is not set, use the legacy ephemeral endpoint - ccSubj = fmt.Sprintf(apiLegacyConsumerCreateT, stream) - } - } - - resp, err := js.apiRequestWithContext(o.ctx, js.apiSubj(ccSubj), req) - if err != nil { - if errors.Is(err, ErrNoResponders) { - err = ErrJetStreamNotEnabled - } - return nil, err - } - var info consumerResponse - err = json.Unmarshal(resp.Data, &info) - if err != nil { - return nil, err - } - if info.Error != nil { - if errors.Is(info.Error, ErrStreamNotFound) { - return nil, ErrStreamNotFound - } - if errors.Is(info.Error, ErrConsumerNotFound) { - return nil, ErrConsumerNotFound - } - return nil, info.Error - } - - // check whether multiple filter subjects (if used) are reflected in the returned ConsumerInfo - if len(cfg.FilterSubjects) != 0 && len(info.Config.FilterSubjects) == 0 { - return nil, ErrConsumerMultipleFilterSubjectsNotSupported - } - return info.ConsumerInfo, nil -} - -// consumerDeleteResponse is the response for a Consumer delete request. -type consumerDeleteResponse struct { - apiResponse - Success bool `json:"success,omitempty"` -} - -func checkStreamName(stream string) error { - if stream == _EMPTY_ { - return ErrStreamNameRequired - } - if strings.ContainsAny(stream, ". ") { - return ErrInvalidStreamName - } - return nil -} - -// Check that the consumer name is not empty and is valid (does not contain "." and " "). -// Additional consumer name validation is done in nats-server. -// Returns ErrConsumerNameRequired if consumer name is empty, ErrInvalidConsumerName is invalid, otherwise nil -func checkConsumerName(consumer string) error { - if consumer == _EMPTY_ { - return ErrConsumerNameRequired - } - if strings.ContainsAny(consumer, ". ") { - return ErrInvalidConsumerName - } - return nil -} - -// DeleteConsumer deletes a Consumer. -func (js *js) DeleteConsumer(stream, consumer string, opts ...JSOpt) error { - if err := checkStreamName(stream); err != nil { - return err - } - if err := checkConsumerName(consumer); err != nil { - return err - } - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return err - } - if cancel != nil { - defer cancel() - } - - dcSubj := js.apiSubj(fmt.Sprintf(apiConsumerDeleteT, stream, consumer)) - r, err := js.apiRequestWithContext(o.ctx, dcSubj, nil) - if err != nil { - return err - } - var resp consumerDeleteResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - return err - } - - if resp.Error != nil { - if errors.Is(resp.Error, ErrConsumerNotFound) { - return ErrConsumerNotFound - } - return resp.Error - } - return nil -} - -// ConsumerInfo returns information about a Consumer. -func (js *js) ConsumerInfo(stream, consumer string, opts ...JSOpt) (*ConsumerInfo, error) { - if err := checkStreamName(stream); err != nil { - return nil, err - } - if err := checkConsumerName(consumer); err != nil { - return nil, err - } - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return nil, err - } - if cancel != nil { - defer cancel() - } - return js.getConsumerInfoContext(o.ctx, stream, consumer) -} - -// consumerLister fetches pages of ConsumerInfo objects. This object is not -// safe to use for multiple threads. -type consumerLister struct { - stream string - js *js - - err error - offset int - page []*ConsumerInfo - pageInfo *apiPaged -} - -// consumersRequest is the type used for Consumers requests. -type consumersRequest struct { - apiPagedRequest -} - -// consumerListResponse is the response for a Consumers List request. -type consumerListResponse struct { - apiResponse - apiPaged - Consumers []*ConsumerInfo `json:"consumers"` -} - -// Next fetches the next ConsumerInfo page. -func (c *consumerLister) Next() bool { - if c.err != nil { - return false - } - if err := checkStreamName(c.stream); err != nil { - c.err = err - return false - } - if c.pageInfo != nil && c.offset >= c.pageInfo.Total { - return false - } - - req, err := json.Marshal(consumersRequest{ - apiPagedRequest: apiPagedRequest{Offset: c.offset}, - }) - if err != nil { - c.err = err - return false - } - - var cancel context.CancelFunc - ctx := c.js.opts.ctx - if ctx == nil { - ctx, cancel = context.WithTimeout(context.Background(), c.js.opts.wait) - defer cancel() - } - - clSubj := c.js.apiSubj(fmt.Sprintf(apiConsumerListT, c.stream)) - r, err := c.js.apiRequestWithContext(ctx, clSubj, req) - if err != nil { - c.err = err - return false - } - var resp consumerListResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - c.err = err - return false - } - if resp.Error != nil { - c.err = resp.Error - return false - } - - c.pageInfo = &resp.apiPaged - c.page = resp.Consumers - c.offset += len(c.page) - return true -} - -// Page returns the current ConsumerInfo page. -func (c *consumerLister) Page() []*ConsumerInfo { - return c.page -} - -// Err returns any errors found while fetching pages. -func (c *consumerLister) Err() error { - return c.err -} - -// Consumers is used to retrieve a list of ConsumerInfo objects. -func (jsc *js) Consumers(stream string, opts ...JSOpt) <-chan *ConsumerInfo { - o, cancel, err := getJSContextOpts(jsc.opts, opts...) - if err != nil { - return nil - } - - ch := make(chan *ConsumerInfo) - l := &consumerLister{js: &js{nc: jsc.nc, opts: o}, stream: stream} - go func() { - if cancel != nil { - defer cancel() - } - defer close(ch) - for l.Next() { - for _, info := range l.Page() { - select { - case ch <- info: - case <-o.ctx.Done(): - return - } - } - } - }() - - return ch -} - -// ConsumersInfo is used to retrieve a list of ConsumerInfo objects. -// DEPRECATED: Use Consumers() instead. -func (jsc *js) ConsumersInfo(stream string, opts ...JSOpt) <-chan *ConsumerInfo { - return jsc.Consumers(stream, opts...) -} - -type consumerNamesLister struct { - stream string - js *js - - err error - offset int - page []string - pageInfo *apiPaged -} - -// consumerNamesListResponse is the response for a Consumers Names List request. -type consumerNamesListResponse struct { - apiResponse - apiPaged - Consumers []string `json:"consumers"` -} - -// Next fetches the next consumer names page. -func (c *consumerNamesLister) Next() bool { - if c.err != nil { - return false - } - if err := checkStreamName(c.stream); err != nil { - c.err = err - return false - } - if c.pageInfo != nil && c.offset >= c.pageInfo.Total { - return false - } - - var cancel context.CancelFunc - ctx := c.js.opts.ctx - if ctx == nil { - ctx, cancel = context.WithTimeout(context.Background(), c.js.opts.wait) - defer cancel() - } - - req, err := json.Marshal(consumersRequest{ - apiPagedRequest: apiPagedRequest{Offset: c.offset}, - }) - if err != nil { - c.err = err - return false - } - clSubj := c.js.apiSubj(fmt.Sprintf(apiConsumerNamesT, c.stream)) - r, err := c.js.apiRequestWithContext(ctx, clSubj, req) - if err != nil { - c.err = err - return false - } - var resp consumerNamesListResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - c.err = err - return false - } - if resp.Error != nil { - c.err = resp.Error - return false - } - - c.pageInfo = &resp.apiPaged - c.page = resp.Consumers - c.offset += len(c.page) - return true -} - -// Page returns the current ConsumerInfo page. -func (c *consumerNamesLister) Page() []string { - return c.page -} - -// Err returns any errors found while fetching pages. -func (c *consumerNamesLister) Err() error { - return c.err -} - -// ConsumerNames is used to retrieve a list of Consumer names. -func (jsc *js) ConsumerNames(stream string, opts ...JSOpt) <-chan string { - o, cancel, err := getJSContextOpts(jsc.opts, opts...) - if err != nil { - return nil - } - - ch := make(chan string) - l := &consumerNamesLister{stream: stream, js: &js{nc: jsc.nc, opts: o}} - go func() { - if cancel != nil { - defer cancel() - } - defer close(ch) - for l.Next() { - for _, info := range l.Page() { - select { - case ch <- info: - case <-o.ctx.Done(): - return - } - } - } - }() - - return ch -} - -// streamCreateResponse stream creation. -type streamCreateResponse struct { - apiResponse - *StreamInfo -} - -func (js *js) AddStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) { - if cfg == nil { - return nil, ErrStreamConfigRequired - } - if err := checkStreamName(cfg.Name); err != nil { - return nil, err - } - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return nil, err - } - if cancel != nil { - defer cancel() - } - - // In case we need to change anything, copy so we do not change the caller's version. - ncfg := *cfg - - // If we have a mirror and an external domain, convert to ext.APIPrefix. - if cfg.Mirror != nil && cfg.Mirror.Domain != _EMPTY_ { - // Copy so we do not change the caller's version. - ncfg.Mirror = ncfg.Mirror.copy() - if err := ncfg.Mirror.convertDomain(); err != nil { - return nil, err - } - } - // Check sources for the same. - if len(ncfg.Sources) > 0 { - ncfg.Sources = append([]*StreamSource(nil), ncfg.Sources...) - for i, ss := range ncfg.Sources { - if ss.Domain != _EMPTY_ { - ncfg.Sources[i] = ss.copy() - if err := ncfg.Sources[i].convertDomain(); err != nil { - return nil, err - } - } - } - } - - req, err := json.Marshal(&ncfg) - if err != nil { - return nil, err - } - - csSubj := js.apiSubj(fmt.Sprintf(apiStreamCreateT, cfg.Name)) - r, err := js.apiRequestWithContext(o.ctx, csSubj, req) - if err != nil { - return nil, err - } - var resp streamCreateResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - return nil, err - } - if resp.Error != nil { - if errors.Is(resp.Error, ErrStreamNameAlreadyInUse) { - return nil, ErrStreamNameAlreadyInUse - } - return nil, resp.Error - } - - // check that input subject transform (if used) is reflected in the returned ConsumerInfo - if cfg.SubjectTransform != nil && resp.StreamInfo.Config.SubjectTransform == nil { - return nil, ErrStreamSubjectTransformNotSupported - } - if len(cfg.Sources) != 0 { - if len(cfg.Sources) != len(resp.Config.Sources) { - return nil, ErrStreamSourceNotSupported - } - for i := range cfg.Sources { - if len(cfg.Sources[i].SubjectTransforms) != 0 && len(resp.Sources[i].SubjectTransforms) == 0 { - return nil, ErrStreamSourceMultipleSubjectTransformsNotSupported - } - } - } - - return resp.StreamInfo, nil -} - -type ( - // StreamInfoRequest contains additional option to return - StreamInfoRequest struct { - apiPagedRequest - // DeletedDetails when true includes information about deleted messages - DeletedDetails bool `json:"deleted_details,omitempty"` - // SubjectsFilter when set, returns information on the matched subjects - SubjectsFilter string `json:"subjects_filter,omitempty"` - } - streamInfoResponse = struct { - apiResponse - apiPaged - *StreamInfo - } -) - -func (js *js) StreamInfo(stream string, opts ...JSOpt) (*StreamInfo, error) { - if err := checkStreamName(stream); err != nil { - return nil, err - } - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return nil, err - } - if cancel != nil { - defer cancel() - } - - var i int - var subjectMessagesMap map[string]uint64 - var req []byte - var requestPayload bool - - var siOpts StreamInfoRequest - if o.streamInfoOpts != nil { - requestPayload = true - siOpts = *o.streamInfoOpts - } - - for { - if requestPayload { - siOpts.Offset = i - if req, err = json.Marshal(&siOpts); err != nil { - return nil, err - } - } - - siSubj := js.apiSubj(fmt.Sprintf(apiStreamInfoT, stream)) - - r, err := js.apiRequestWithContext(o.ctx, siSubj, req) - if err != nil { - return nil, err - } - - var resp streamInfoResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - return nil, err - } - - if resp.Error != nil { - if errors.Is(resp.Error, ErrStreamNotFound) { - return nil, ErrStreamNotFound - } - return nil, resp.Error - } - - var total int - // for backwards compatibility - if resp.Total != 0 { - total = resp.Total - } else { - total = len(resp.State.Subjects) - } - - if requestPayload && len(resp.StreamInfo.State.Subjects) > 0 { - if subjectMessagesMap == nil { - subjectMessagesMap = make(map[string]uint64, total) - } - - for k, j := range resp.State.Subjects { - subjectMessagesMap[k] = j - i++ - } - } - - if i >= total { - if requestPayload { - resp.StreamInfo.State.Subjects = subjectMessagesMap - } - return resp.StreamInfo, nil - } - } -} - -// StreamInfo shows config and current state for this stream. -type StreamInfo struct { - Config StreamConfig `json:"config"` - Created time.Time `json:"created"` - State StreamState `json:"state"` - Cluster *ClusterInfo `json:"cluster,omitempty"` - Mirror *StreamSourceInfo `json:"mirror,omitempty"` - Sources []*StreamSourceInfo `json:"sources,omitempty"` - Alternates []*StreamAlternate `json:"alternates,omitempty"` -} - -// StreamAlternate is an alternate stream represented by a mirror. -type StreamAlternate struct { - Name string `json:"name"` - Domain string `json:"domain,omitempty"` - Cluster string `json:"cluster"` -} - -// StreamSourceInfo shows information about an upstream stream source. -type StreamSourceInfo struct { - Name string `json:"name"` - Lag uint64 `json:"lag"` - Active time.Duration `json:"active"` - External *ExternalStream `json:"external"` - Error *APIError `json:"error"` - FilterSubject string `json:"filter_subject,omitempty"` - SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"` -} - -// StreamState is information about the given stream. -type StreamState struct { - Msgs uint64 `json:"messages"` - Bytes uint64 `json:"bytes"` - FirstSeq uint64 `json:"first_seq"` - FirstTime time.Time `json:"first_ts"` - LastSeq uint64 `json:"last_seq"` - LastTime time.Time `json:"last_ts"` - Consumers int `json:"consumer_count"` - Deleted []uint64 `json:"deleted"` - NumDeleted int `json:"num_deleted"` - NumSubjects uint64 `json:"num_subjects"` - Subjects map[string]uint64 `json:"subjects"` -} - -// ClusterInfo shows information about the underlying set of servers -// that make up the stream or consumer. -type ClusterInfo struct { - Name string `json:"name,omitempty"` - Leader string `json:"leader,omitempty"` - Replicas []*PeerInfo `json:"replicas,omitempty"` -} - -// PeerInfo shows information about all the peers in the cluster that -// are supporting the stream or consumer. -type PeerInfo struct { - Name string `json:"name"` - Current bool `json:"current"` - Offline bool `json:"offline,omitempty"` - Active time.Duration `json:"active"` - Lag uint64 `json:"lag,omitempty"` -} - -// UpdateStream updates a Stream. -func (js *js) UpdateStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) { - if cfg == nil { - return nil, ErrStreamConfigRequired - } - if err := checkStreamName(cfg.Name); err != nil { - return nil, err - } - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return nil, err - } - if cancel != nil { - defer cancel() - } - - req, err := json.Marshal(cfg) - if err != nil { - return nil, err - } - - usSubj := js.apiSubj(fmt.Sprintf(apiStreamUpdateT, cfg.Name)) - r, err := js.apiRequestWithContext(o.ctx, usSubj, req) - if err != nil { - return nil, err - } - var resp streamInfoResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - return nil, err - } - if resp.Error != nil { - if errors.Is(resp.Error, ErrStreamNotFound) { - return nil, ErrStreamNotFound - } - return nil, resp.Error - } - - // check that input subject transform (if used) is reflected in the returned StreamInfo - if cfg.SubjectTransform != nil && resp.StreamInfo.Config.SubjectTransform == nil { - return nil, ErrStreamSubjectTransformNotSupported - } - - if len(cfg.Sources) != 0 { - if len(cfg.Sources) != len(resp.Config.Sources) { - return nil, ErrStreamSourceNotSupported - } - for i := range cfg.Sources { - if len(cfg.Sources[i].SubjectTransforms) != 0 && len(resp.Sources[i].SubjectTransforms) == 0 { - return nil, ErrStreamSourceMultipleSubjectTransformsNotSupported - } - } - } - - return resp.StreamInfo, nil -} - -// streamDeleteResponse is the response for a Stream delete request. -type streamDeleteResponse struct { - apiResponse - Success bool `json:"success,omitempty"` -} - -// DeleteStream deletes a Stream. -func (js *js) DeleteStream(name string, opts ...JSOpt) error { - if err := checkStreamName(name); err != nil { - return err - } - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return err - } - if cancel != nil { - defer cancel() - } - - dsSubj := js.apiSubj(fmt.Sprintf(apiStreamDeleteT, name)) - r, err := js.apiRequestWithContext(o.ctx, dsSubj, nil) - if err != nil { - return err - } - var resp streamDeleteResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - return err - } - - if resp.Error != nil { - if errors.Is(resp.Error, ErrStreamNotFound) { - return ErrStreamNotFound - } - return resp.Error - } - return nil -} - -type apiMsgGetRequest struct { - Seq uint64 `json:"seq,omitempty"` - LastFor string `json:"last_by_subj,omitempty"` - NextFor string `json:"next_by_subj,omitempty"` -} - -// RawStreamMsg is a raw message stored in JetStream. -type RawStreamMsg struct { - Subject string - Sequence uint64 - Header Header - Data []byte - Time time.Time -} - -// storedMsg is a raw message stored in JetStream. -type storedMsg struct { - Subject string `json:"subject"` - Sequence uint64 `json:"seq"` - Header []byte `json:"hdrs,omitempty"` - Data []byte `json:"data,omitempty"` - Time time.Time `json:"time"` -} - -// apiMsgGetResponse is the response for a Stream get request. -type apiMsgGetResponse struct { - apiResponse - Message *storedMsg `json:"message,omitempty"` -} - -// GetLastMsg retrieves the last raw stream message stored in JetStream by subject. -func (js *js) GetLastMsg(name, subject string, opts ...JSOpt) (*RawStreamMsg, error) { - return js.getMsg(name, &apiMsgGetRequest{LastFor: subject}, opts...) -} - -// GetMsg retrieves a raw stream message stored in JetStream by sequence number. -func (js *js) GetMsg(name string, seq uint64, opts ...JSOpt) (*RawStreamMsg, error) { - return js.getMsg(name, &apiMsgGetRequest{Seq: seq}, opts...) -} - -// Low level getMsg -func (js *js) getMsg(name string, mreq *apiMsgGetRequest, opts ...JSOpt) (*RawStreamMsg, error) { - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return nil, err - } - if cancel != nil { - defer cancel() - } - - if err := checkStreamName(name); err != nil { - return nil, err - } - - var apiSubj string - if o.directGet && mreq.LastFor != _EMPTY_ { - apiSubj = apiDirectMsgGetLastBySubjectT - dsSubj := js.apiSubj(fmt.Sprintf(apiSubj, name, mreq.LastFor)) - r, err := js.apiRequestWithContext(o.ctx, dsSubj, nil) - if err != nil { - return nil, err - } - return convertDirectGetMsgResponseToMsg(name, r) - } - - if o.directGet { - apiSubj = apiDirectMsgGetT - mreq.NextFor = o.directNextFor - } else { - apiSubj = apiMsgGetT - } - - req, err := json.Marshal(mreq) - if err != nil { - return nil, err - } - - dsSubj := js.apiSubj(fmt.Sprintf(apiSubj, name)) - r, err := js.apiRequestWithContext(o.ctx, dsSubj, req) - if err != nil { - return nil, err - } - - if o.directGet { - return convertDirectGetMsgResponseToMsg(name, r) - } - - var resp apiMsgGetResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - return nil, err - } - if resp.Error != nil { - if errors.Is(resp.Error, ErrMsgNotFound) { - return nil, ErrMsgNotFound - } - if errors.Is(resp.Error, ErrStreamNotFound) { - return nil, ErrStreamNotFound - } - return nil, resp.Error - } - - msg := resp.Message - - var hdr Header - if len(msg.Header) > 0 { - hdr, err = DecodeHeadersMsg(msg.Header) - if err != nil { - return nil, err - } - } - - return &RawStreamMsg{ - Subject: msg.Subject, - Sequence: msg.Sequence, - Header: hdr, - Data: msg.Data, - Time: msg.Time, - }, nil -} - -func convertDirectGetMsgResponseToMsg(name string, r *Msg) (*RawStreamMsg, error) { - // Check for 404/408. We would get a no-payload message and a "Status" header - if len(r.Data) == 0 { - val := r.Header.Get(statusHdr) - if val != _EMPTY_ { - switch val { - case noMessagesSts: - return nil, ErrMsgNotFound - default: - desc := r.Header.Get(descrHdr) - if desc == _EMPTY_ { - desc = "unable to get message" - } - return nil, fmt.Errorf("nats: %s", desc) - } - } - } - // Check for headers that give us the required information to - // reconstruct the message. - if len(r.Header) == 0 { - return nil, fmt.Errorf("nats: response should have headers") - } - stream := r.Header.Get(JSStream) - if stream == _EMPTY_ { - return nil, fmt.Errorf("nats: missing stream header") - } - - // Mirrors can now answer direct gets, so removing check for name equality. - // TODO(dlc) - We could have server also have a header with origin and check that? - - seqStr := r.Header.Get(JSSequence) - if seqStr == _EMPTY_ { - return nil, fmt.Errorf("nats: missing sequence header") - } - seq, err := strconv.ParseUint(seqStr, 10, 64) - if err != nil { - return nil, fmt.Errorf("nats: invalid sequence header '%s': %v", seqStr, err) - } - timeStr := r.Header.Get(JSTimeStamp) - if timeStr == _EMPTY_ { - return nil, fmt.Errorf("nats: missing timestamp header") - } - // Temporary code: the server in main branch is sending with format - // "2006-01-02 15:04:05.999999999 +0000 UTC", but will be changed - // to use format RFC3339Nano. Because of server test deps/cycle, - // support both until the server PR lands. - tm, err := time.Parse(time.RFC3339Nano, timeStr) - if err != nil { - tm, err = time.Parse("2006-01-02 15:04:05.999999999 +0000 UTC", timeStr) - if err != nil { - return nil, fmt.Errorf("nats: invalid timestamp header '%s': %v", timeStr, err) - } - } - subj := r.Header.Get(JSSubject) - if subj == _EMPTY_ { - return nil, fmt.Errorf("nats: missing subject header") - } - return &RawStreamMsg{ - Subject: subj, - Sequence: seq, - Header: r.Header, - Data: r.Data, - Time: tm, - }, nil -} - -type msgDeleteRequest struct { - Seq uint64 `json:"seq"` - NoErase bool `json:"no_erase,omitempty"` -} - -// msgDeleteResponse is the response for a Stream delete request. -type msgDeleteResponse struct { - apiResponse - Success bool `json:"success,omitempty"` -} - -// DeleteMsg deletes a message from a stream. -// The message is marked as erased, but not overwritten -func (js *js) DeleteMsg(name string, seq uint64, opts ...JSOpt) error { - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return err - } - if cancel != nil { - defer cancel() - } - - return js.deleteMsg(o.ctx, name, &msgDeleteRequest{Seq: seq, NoErase: true}) -} - -// SecureDeleteMsg deletes a message from a stream. The deleted message is overwritten with random data -// As a result, this operation is slower than DeleteMsg() -func (js *js) SecureDeleteMsg(name string, seq uint64, opts ...JSOpt) error { - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return err - } - if cancel != nil { - defer cancel() - } - - return js.deleteMsg(o.ctx, name, &msgDeleteRequest{Seq: seq}) -} - -func (js *js) deleteMsg(ctx context.Context, stream string, req *msgDeleteRequest) error { - if err := checkStreamName(stream); err != nil { - return err - } - reqJSON, err := json.Marshal(req) - if err != nil { - return err - } - - dsSubj := js.apiSubj(fmt.Sprintf(apiMsgDeleteT, stream)) - r, err := js.apiRequestWithContext(ctx, dsSubj, reqJSON) - if err != nil { - return err - } - var resp msgDeleteResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - return err - } - if resp.Error != nil { - return resp.Error - } - return nil -} - -// StreamPurgeRequest is optional request information to the purge API. -type StreamPurgeRequest struct { - // Purge up to but not including sequence. - Sequence uint64 `json:"seq,omitempty"` - // Subject to match against messages for the purge command. - Subject string `json:"filter,omitempty"` - // Number of messages to keep. - Keep uint64 `json:"keep,omitempty"` -} - -type streamPurgeResponse struct { - apiResponse - Success bool `json:"success,omitempty"` - Purged uint64 `json:"purged"` -} - -// PurgeStream purges messages on a Stream. -func (js *js) PurgeStream(stream string, opts ...JSOpt) error { - if err := checkStreamName(stream); err != nil { - return err - } - var req *StreamPurgeRequest - var ok bool - for _, opt := range opts { - // For PurgeStream, only request body opt is relevant - if req, ok = opt.(*StreamPurgeRequest); ok { - break - } - } - return js.purgeStream(stream, req) -} - -func (js *js) purgeStream(stream string, req *StreamPurgeRequest, opts ...JSOpt) error { - o, cancel, err := getJSContextOpts(js.opts, opts...) - if err != nil { - return err - } - if cancel != nil { - defer cancel() - } - - var b []byte - if req != nil { - if b, err = json.Marshal(req); err != nil { - return err - } - } - - psSubj := js.apiSubj(fmt.Sprintf(apiStreamPurgeT, stream)) - r, err := js.apiRequestWithContext(o.ctx, psSubj, b) - if err != nil { - return err - } - var resp streamPurgeResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - return err - } - if resp.Error != nil { - if errors.Is(resp.Error, ErrBadRequest) { - return fmt.Errorf("%w: %s", ErrBadRequest, "invalid purge request body") - } - return resp.Error - } - return nil -} - -// streamLister fetches pages of StreamInfo objects. This object is not safe -// to use for multiple threads. -type streamLister struct { - js *js - page []*StreamInfo - err error - - offset int - pageInfo *apiPaged -} - -// streamListResponse list of detailed stream information. -// A nil request is valid and means all streams. -type streamListResponse struct { - apiResponse - apiPaged - Streams []*StreamInfo `json:"streams"` -} - -// streamNamesRequest is used for Stream Name requests. -type streamNamesRequest struct { - apiPagedRequest - // These are filters that can be applied to the list. - Subject string `json:"subject,omitempty"` -} - -// Next fetches the next StreamInfo page. -func (s *streamLister) Next() bool { - if s.err != nil { - return false - } - if s.pageInfo != nil && s.offset >= s.pageInfo.Total { - return false - } - - req, err := json.Marshal(streamNamesRequest{ - apiPagedRequest: apiPagedRequest{Offset: s.offset}, - Subject: s.js.opts.streamListSubject, - }) - if err != nil { - s.err = err - return false - } - - var cancel context.CancelFunc - ctx := s.js.opts.ctx - if ctx == nil { - ctx, cancel = context.WithTimeout(context.Background(), s.js.opts.wait) - defer cancel() - } - - slSubj := s.js.apiSubj(apiStreamListT) - r, err := s.js.apiRequestWithContext(ctx, slSubj, req) - if err != nil { - s.err = err - return false - } - var resp streamListResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - s.err = err - return false - } - if resp.Error != nil { - s.err = resp.Error - return false - } - - s.pageInfo = &resp.apiPaged - s.page = resp.Streams - s.offset += len(s.page) - return true -} - -// Page returns the current StreamInfo page. -func (s *streamLister) Page() []*StreamInfo { - return s.page -} - -// Err returns any errors found while fetching pages. -func (s *streamLister) Err() error { - return s.err -} - -// Streams can be used to retrieve a list of StreamInfo objects. -func (jsc *js) Streams(opts ...JSOpt) <-chan *StreamInfo { - o, cancel, err := getJSContextOpts(jsc.opts, opts...) - if err != nil { - return nil - } - - ch := make(chan *StreamInfo) - l := &streamLister{js: &js{nc: jsc.nc, opts: o}} - go func() { - if cancel != nil { - defer cancel() - } - defer close(ch) - for l.Next() { - for _, info := range l.Page() { - select { - case ch <- info: - case <-o.ctx.Done(): - return - } - } - } - }() - - return ch -} - -// StreamsInfo can be used to retrieve a list of StreamInfo objects. -// DEPRECATED: Use Streams() instead. -func (jsc *js) StreamsInfo(opts ...JSOpt) <-chan *StreamInfo { - return jsc.Streams(opts...) -} - -type streamNamesLister struct { - js *js - - err error - offset int - page []string - pageInfo *apiPaged -} - -// Next fetches the next stream names page. -func (l *streamNamesLister) Next() bool { - if l.err != nil { - return false - } - if l.pageInfo != nil && l.offset >= l.pageInfo.Total { - return false - } - - var cancel context.CancelFunc - ctx := l.js.opts.ctx - if ctx == nil { - ctx, cancel = context.WithTimeout(context.Background(), l.js.opts.wait) - defer cancel() - } - - req, err := json.Marshal(streamNamesRequest{ - apiPagedRequest: apiPagedRequest{Offset: l.offset}, - Subject: l.js.opts.streamListSubject, - }) - if err != nil { - l.err = err - return false - } - r, err := l.js.apiRequestWithContext(ctx, l.js.apiSubj(apiStreams), req) - if err != nil { - l.err = err - return false - } - var resp streamNamesResponse - if err := json.Unmarshal(r.Data, &resp); err != nil { - l.err = err - return false - } - if resp.Error != nil { - l.err = resp.Error - return false - } - - l.pageInfo = &resp.apiPaged - l.page = resp.Streams - l.offset += len(l.page) - return true -} - -// Page returns the current ConsumerInfo page. -func (l *streamNamesLister) Page() []string { - return l.page -} - -// Err returns any errors found while fetching pages. -func (l *streamNamesLister) Err() error { - return l.err -} - -// StreamNames is used to retrieve a list of Stream names. -func (jsc *js) StreamNames(opts ...JSOpt) <-chan string { - o, cancel, err := getJSContextOpts(jsc.opts, opts...) - if err != nil { - return nil - } - - ch := make(chan string) - l := &streamNamesLister{js: &js{nc: jsc.nc, opts: o}} - go func() { - if cancel != nil { - defer cancel() - } - defer close(ch) - for l.Next() { - for _, info := range l.Page() { - select { - case ch <- info: - case <-o.ctx.Done(): - return - } - } - } - }() - - return ch -} - -// StreamNameBySubject returns a stream name that matches the subject. -func (jsc *js) StreamNameBySubject(subj string, opts ...JSOpt) (string, error) { - o, cancel, err := getJSContextOpts(jsc.opts, opts...) - if err != nil { - return "", err - } - if cancel != nil { - defer cancel() - } - - var slr streamNamesResponse - req := &streamRequest{subj} - j, err := json.Marshal(req) - if err != nil { - return _EMPTY_, err - } - - resp, err := jsc.apiRequestWithContext(o.ctx, jsc.apiSubj(apiStreams), j) - if err != nil { - if errors.Is(err, ErrNoResponders) { - err = ErrJetStreamNotEnabled - } - return _EMPTY_, err - } - if err := json.Unmarshal(resp.Data, &slr); err != nil { - return _EMPTY_, err - } - - if slr.Error != nil || len(slr.Streams) != 1 { - return _EMPTY_, ErrNoMatchingStream - } - return slr.Streams[0], nil -} - -func getJSContextOpts(defs *jsOpts, opts ...JSOpt) (*jsOpts, context.CancelFunc, error) { - var o jsOpts - for _, opt := range opts { - if err := opt.configureJSContext(&o); err != nil { - return nil, nil, err - } - } - - // Check for option collisions. Right now just timeout and context. - if o.ctx != nil && o.wait != 0 { - return nil, nil, ErrContextAndTimeout - } - if o.wait == 0 && o.ctx == nil { - o.wait = defs.wait - } - var cancel context.CancelFunc - if o.ctx == nil && o.wait > 0 { - o.ctx, cancel = context.WithTimeout(context.Background(), o.wait) - } - if o.pre == _EMPTY_ { - o.pre = defs.pre - } - - return &o, cancel, nil -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/kv.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/kv.go deleted file mode 100644 index 0864f30..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/kv.go +++ /dev/null @@ -1,1178 +0,0 @@ -// Copyright 2021-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "context" - "errors" - "fmt" - "reflect" - "regexp" - "strconv" - "strings" - "sync" - "time" - - "github.com/nats-io/nats.go/internal/parser" -) - -// KeyValueManager is used to manage KeyValue stores. -type KeyValueManager interface { - // KeyValue will lookup and bind to an existing KeyValue store. - KeyValue(bucket string) (KeyValue, error) - // CreateKeyValue will create a KeyValue store with the following configuration. - CreateKeyValue(cfg *KeyValueConfig) (KeyValue, error) - // DeleteKeyValue will delete this KeyValue store (JetStream stream). - DeleteKeyValue(bucket string) error - // KeyValueStoreNames is used to retrieve a list of key value store names - KeyValueStoreNames() <-chan string - // KeyValueStores is used to retrieve a list of key value store statuses - KeyValueStores() <-chan KeyValueStatus -} - -// KeyValue contains methods to operate on a KeyValue store. -type KeyValue interface { - // Get returns the latest value for the key. - Get(key string) (entry KeyValueEntry, err error) - // GetRevision returns a specific revision value for the key. - GetRevision(key string, revision uint64) (entry KeyValueEntry, err error) - // Put will place the new value for the key into the store. - Put(key string, value []byte) (revision uint64, err error) - // PutString will place the string for the key into the store. - PutString(key string, value string) (revision uint64, err error) - // Create will add the key/value pair iff it does not exist. - Create(key string, value []byte) (revision uint64, err error) - // Update will update the value iff the latest revision matches. - Update(key string, value []byte, last uint64) (revision uint64, err error) - // Delete will place a delete marker and leave all revisions. - Delete(key string, opts ...DeleteOpt) error - // Purge will place a delete marker and remove all previous revisions. - Purge(key string, opts ...DeleteOpt) error - // Watch for any updates to keys that match the keys argument which could include wildcards. - // Watch will send a nil entry when it has received all initial values. - Watch(keys string, opts ...WatchOpt) (KeyWatcher, error) - // WatchAll will invoke the callback for all updates. - WatchAll(opts ...WatchOpt) (KeyWatcher, error) - // Keys will return all keys. - // DEPRECATED: Use ListKeys instead to avoid memory issues. - Keys(opts ...WatchOpt) ([]string, error) - // ListKeys will return all keys in a channel. - ListKeys(opts ...WatchOpt) (KeyLister, error) - // History will return all historical values for the key. - History(key string, opts ...WatchOpt) ([]KeyValueEntry, error) - // Bucket returns the current bucket name. - Bucket() string - // PurgeDeletes will remove all current delete markers. - PurgeDeletes(opts ...PurgeOpt) error - // Status retrieves the status and configuration of a bucket - Status() (KeyValueStatus, error) -} - -// KeyValueStatus is run-time status about a Key-Value bucket -type KeyValueStatus interface { - // Bucket the name of the bucket - Bucket() string - - // Values is how many messages are in the bucket, including historical values - Values() uint64 - - // History returns the configured history kept per key - History() int64 - - // TTL is how long the bucket keeps values for - TTL() time.Duration - - // BackingStore indicates what technology is used for storage of the bucket - BackingStore() string - - // Bytes returns the size in bytes of the bucket - Bytes() uint64 - - // IsCompressed indicates if the data is compressed on disk - IsCompressed() bool -} - -// KeyWatcher is what is returned when doing a watch. -type KeyWatcher interface { - // Context returns watcher context optionally provided by nats.Context option. - Context() context.Context - // Updates returns a channel to read any updates to entries. - Updates() <-chan KeyValueEntry - // Stop will stop this watcher. - Stop() error -} - -// KeyLister is used to retrieve a list of key value store keys -type KeyLister interface { - Keys() <-chan string - Stop() error -} - -type WatchOpt interface { - configureWatcher(opts *watchOpts) error -} - -// For nats.Context() support. -func (ctx ContextOpt) configureWatcher(opts *watchOpts) error { - opts.ctx = ctx - return nil -} - -type watchOpts struct { - ctx context.Context - // Do not send delete markers to the update channel. - ignoreDeletes bool - // Include all history per subject, not just last one. - includeHistory bool - // Include only updates for keys. - updatesOnly bool - // retrieve only the meta data of the entry - metaOnly bool -} - -type watchOptFn func(opts *watchOpts) error - -func (opt watchOptFn) configureWatcher(opts *watchOpts) error { - return opt(opts) -} - -// IncludeHistory instructs the key watcher to include historical values as well. -func IncludeHistory() WatchOpt { - return watchOptFn(func(opts *watchOpts) error { - if opts.updatesOnly { - return errors.New("nats: include history can not be used with updates only") - } - opts.includeHistory = true - return nil - }) -} - -// UpdatesOnly instructs the key watcher to only include updates on values (without latest values when started). -func UpdatesOnly() WatchOpt { - return watchOptFn(func(opts *watchOpts) error { - if opts.includeHistory { - return errors.New("nats: updates only can not be used with include history") - } - opts.updatesOnly = true - return nil - }) -} - -// IgnoreDeletes will have the key watcher not pass any deleted keys. -func IgnoreDeletes() WatchOpt { - return watchOptFn(func(opts *watchOpts) error { - opts.ignoreDeletes = true - return nil - }) -} - -// MetaOnly instructs the key watcher to retrieve only the entry meta data, not the entry value -func MetaOnly() WatchOpt { - return watchOptFn(func(opts *watchOpts) error { - opts.metaOnly = true - return nil - }) -} - -type PurgeOpt interface { - configurePurge(opts *purgeOpts) error -} - -type purgeOpts struct { - dmthr time.Duration // Delete markers threshold - ctx context.Context -} - -// DeleteMarkersOlderThan indicates that delete or purge markers older than that -// will be deleted as part of PurgeDeletes() operation, otherwise, only the data -// will be removed but markers that are recent will be kept. -// Note that if no option is specified, the default is 30 minutes. You can set -// this option to a negative value to instruct to always remove the markers, -// regardless of their age. -type DeleteMarkersOlderThan time.Duration - -func (ttl DeleteMarkersOlderThan) configurePurge(opts *purgeOpts) error { - opts.dmthr = time.Duration(ttl) - return nil -} - -// For nats.Context() support. -func (ctx ContextOpt) configurePurge(opts *purgeOpts) error { - opts.ctx = ctx - return nil -} - -type DeleteOpt interface { - configureDelete(opts *deleteOpts) error -} - -type deleteOpts struct { - // Remove all previous revisions. - purge bool - - // Delete only if the latest revision matches. - revision uint64 -} - -type deleteOptFn func(opts *deleteOpts) error - -func (opt deleteOptFn) configureDelete(opts *deleteOpts) error { - return opt(opts) -} - -// LastRevision deletes if the latest revision matches. -func LastRevision(revision uint64) DeleteOpt { - return deleteOptFn(func(opts *deleteOpts) error { - opts.revision = revision - return nil - }) -} - -// purge removes all previous revisions. -func purge() DeleteOpt { - return deleteOptFn(func(opts *deleteOpts) error { - opts.purge = true - return nil - }) -} - -// KeyValueConfig is for configuring a KeyValue store. -type KeyValueConfig struct { - Bucket string - Description string - MaxValueSize int32 - History uint8 - TTL time.Duration - MaxBytes int64 - Storage StorageType - Replicas int - Placement *Placement - RePublish *RePublish - Mirror *StreamSource - Sources []*StreamSource - - // Enable underlying stream compression. - // NOTE: Compression is supported for nats-server 2.10.0+ - Compression bool -} - -// Used to watch all keys. -const ( - KeyValueMaxHistory = 64 - AllKeys = ">" - kvLatestRevision = 0 - kvop = "KV-Operation" - kvdel = "DEL" - kvpurge = "PURGE" -) - -type KeyValueOp uint8 - -const ( - KeyValuePut KeyValueOp = iota - KeyValueDelete - KeyValuePurge -) - -func (op KeyValueOp) String() string { - switch op { - case KeyValuePut: - return "KeyValuePutOp" - case KeyValueDelete: - return "KeyValueDeleteOp" - case KeyValuePurge: - return "KeyValuePurgeOp" - default: - return "Unknown Operation" - } -} - -// KeyValueEntry is a retrieved entry for Get or List or Watch. -type KeyValueEntry interface { - // Bucket is the bucket the data was loaded from. - Bucket() string - // Key is the key that was retrieved. - Key() string - // Value is the retrieved value. - Value() []byte - // Revision is a unique sequence for this value. - Revision() uint64 - // Created is the time the data was put in the bucket. - Created() time.Time - // Delta is distance from the latest value. - Delta() uint64 - // Operation returns Put or Delete or Purge. - Operation() KeyValueOp -} - -// Errors -var ( - ErrKeyValueConfigRequired = errors.New("nats: config required") - ErrInvalidBucketName = errors.New("nats: invalid bucket name") - ErrInvalidKey = errors.New("nats: invalid key") - ErrBucketNotFound = errors.New("nats: bucket not found") - ErrBadBucket = errors.New("nats: bucket not valid key-value store") - ErrKeyNotFound = errors.New("nats: key not found") - ErrKeyDeleted = errors.New("nats: key was deleted") - ErrHistoryToLarge = errors.New("nats: history limited to a max of 64") - ErrNoKeysFound = errors.New("nats: no keys found") -) - -var ( - ErrKeyExists JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamWrongLastSequence, Code: 400}, message: "key exists"} -) - -const ( - kvBucketNamePre = "KV_" - kvBucketNameTmpl = "KV_%s" - kvSubjectsTmpl = "$KV.%s.>" - kvSubjectsPreTmpl = "$KV.%s." - kvSubjectsPreDomainTmpl = "%s.$KV.%s." - kvNoPending = "0" -) - -// Regex for valid keys and buckets. -var ( - validBucketRe = regexp.MustCompile(`\A[a-zA-Z0-9_-]+\z`) - validKeyRe = regexp.MustCompile(`\A[-/_=\.a-zA-Z0-9]+\z`) -) - -// KeyValue will lookup and bind to an existing KeyValue store. -func (js *js) KeyValue(bucket string) (KeyValue, error) { - if !js.nc.serverMinVersion(2, 6, 2) { - return nil, errors.New("nats: key-value requires at least server version 2.6.2") - } - if !validBucketRe.MatchString(bucket) { - return nil, ErrInvalidBucketName - } - stream := fmt.Sprintf(kvBucketNameTmpl, bucket) - si, err := js.StreamInfo(stream) - if err != nil { - if errors.Is(err, ErrStreamNotFound) { - err = ErrBucketNotFound - } - return nil, err - } - // Do some quick sanity checks that this is a correctly formed stream for KV. - // Max msgs per subject should be > 0. - if si.Config.MaxMsgsPerSubject < 1 { - return nil, ErrBadBucket - } - - return mapStreamToKVS(js, si), nil -} - -// CreateKeyValue will create a KeyValue store with the following configuration. -func (js *js) CreateKeyValue(cfg *KeyValueConfig) (KeyValue, error) { - if !js.nc.serverMinVersion(2, 6, 2) { - return nil, errors.New("nats: key-value requires at least server version 2.6.2") - } - if cfg == nil { - return nil, ErrKeyValueConfigRequired - } - if !validBucketRe.MatchString(cfg.Bucket) { - return nil, ErrInvalidBucketName - } - if _, err := js.AccountInfo(); err != nil { - return nil, err - } - - // Default to 1 for history. Max is 64 for now. - history := int64(1) - if cfg.History > 0 { - if cfg.History > KeyValueMaxHistory { - return nil, ErrHistoryToLarge - } - history = int64(cfg.History) - } - - replicas := cfg.Replicas - if replicas == 0 { - replicas = 1 - } - - // We will set explicitly some values so that we can do comparison - // if we get an "already in use" error and need to check if it is same. - maxBytes := cfg.MaxBytes - if maxBytes == 0 { - maxBytes = -1 - } - maxMsgSize := cfg.MaxValueSize - if maxMsgSize == 0 { - maxMsgSize = -1 - } - // When stream's MaxAge is not set, server uses 2 minutes as the default - // for the duplicate window. If MaxAge is set, and lower than 2 minutes, - // then the duplicate window will be set to that. If MaxAge is greater, - // we will cap the duplicate window to 2 minutes (to be consistent with - // previous behavior). - duplicateWindow := 2 * time.Minute - if cfg.TTL > 0 && cfg.TTL < duplicateWindow { - duplicateWindow = cfg.TTL - } - var compression StoreCompression - if cfg.Compression { - compression = S2Compression - } - scfg := &StreamConfig{ - Name: fmt.Sprintf(kvBucketNameTmpl, cfg.Bucket), - Description: cfg.Description, - MaxMsgsPerSubject: history, - MaxBytes: maxBytes, - MaxAge: cfg.TTL, - MaxMsgSize: maxMsgSize, - Storage: cfg.Storage, - Replicas: replicas, - Placement: cfg.Placement, - AllowRollup: true, - DenyDelete: true, - Duplicates: duplicateWindow, - MaxMsgs: -1, - MaxConsumers: -1, - AllowDirect: true, - RePublish: cfg.RePublish, - Compression: compression, - } - if cfg.Mirror != nil { - // Copy in case we need to make changes so we do not change caller's version. - m := cfg.Mirror.copy() - if !strings.HasPrefix(m.Name, kvBucketNamePre) { - m.Name = fmt.Sprintf(kvBucketNameTmpl, m.Name) - } - scfg.Mirror = m - scfg.MirrorDirect = true - } else if len(cfg.Sources) > 0 { - for _, ss := range cfg.Sources { - var sourceBucketName string - if strings.HasPrefix(ss.Name, kvBucketNamePre) { - sourceBucketName = ss.Name[len(kvBucketNamePre):] - } else { - sourceBucketName = ss.Name - ss.Name = fmt.Sprintf(kvBucketNameTmpl, ss.Name) - } - - if ss.External == nil || sourceBucketName != cfg.Bucket { - ss.SubjectTransforms = []SubjectTransformConfig{{Source: fmt.Sprintf(kvSubjectsTmpl, sourceBucketName), Destination: fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)}} - } - scfg.Sources = append(scfg.Sources, ss) - } - scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)} - } else { - scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)} - } - - // If we are at server version 2.7.2 or above use DiscardNew. We can not use DiscardNew for 2.7.1 or below. - if js.nc.serverMinVersion(2, 7, 2) { - scfg.Discard = DiscardNew - } - - si, err := js.AddStream(scfg) - if err != nil { - // If we have a failure to add, it could be because we have - // a config change if the KV was created against a pre 2.7.2 - // and we are now moving to a v2.7.2+. If that is the case - // and the only difference is the discard policy, then update - // the stream. - // The same logic applies for KVs created pre 2.9.x and - // the AllowDirect setting. - if errors.Is(err, ErrStreamNameAlreadyInUse) { - if si, _ = js.StreamInfo(scfg.Name); si != nil { - // To compare, make the server's stream info discard - // policy same than ours. - si.Config.Discard = scfg.Discard - // Also need to set allow direct for v2.9.x+ - si.Config.AllowDirect = scfg.AllowDirect - if reflect.DeepEqual(&si.Config, scfg) { - si, err = js.UpdateStream(scfg) - } - } - } - if err != nil { - return nil, err - } - } - return mapStreamToKVS(js, si), nil -} - -// DeleteKeyValue will delete this KeyValue store (JetStream stream). -func (js *js) DeleteKeyValue(bucket string) error { - if !validBucketRe.MatchString(bucket) { - return ErrInvalidBucketName - } - stream := fmt.Sprintf(kvBucketNameTmpl, bucket) - return js.DeleteStream(stream) -} - -type kvs struct { - name string - stream string - pre string - putPre string - js *js - // If true, it means that APIPrefix/Domain was set in the context - // and we need to add something to some of our high level protocols - // (such as Put, etc..) - useJSPfx bool - // To know if we can use the stream direct get API - useDirect bool -} - -// Underlying entry. -type kve struct { - bucket string - key string - value []byte - revision uint64 - delta uint64 - created time.Time - op KeyValueOp -} - -func (e *kve) Bucket() string { return e.bucket } -func (e *kve) Key() string { return e.key } -func (e *kve) Value() []byte { return e.value } -func (e *kve) Revision() uint64 { return e.revision } -func (e *kve) Created() time.Time { return e.created } -func (e *kve) Delta() uint64 { return e.delta } -func (e *kve) Operation() KeyValueOp { return e.op } - -func keyValid(key string) bool { - if len(key) == 0 || key[0] == '.' || key[len(key)-1] == '.' { - return false - } - return validKeyRe.MatchString(key) -} - -// Get returns the latest value for the key. -func (kv *kvs) Get(key string) (KeyValueEntry, error) { - e, err := kv.get(key, kvLatestRevision) - if err != nil { - if errors.Is(err, ErrKeyDeleted) { - return nil, ErrKeyNotFound - } - return nil, err - } - - return e, nil -} - -// GetRevision returns a specific revision value for the key. -func (kv *kvs) GetRevision(key string, revision uint64) (KeyValueEntry, error) { - e, err := kv.get(key, revision) - if err != nil { - if errors.Is(err, ErrKeyDeleted) { - return nil, ErrKeyNotFound - } - return nil, err - } - - return e, nil -} - -func (kv *kvs) get(key string, revision uint64) (KeyValueEntry, error) { - if !keyValid(key) { - return nil, ErrInvalidKey - } - - var b strings.Builder - b.WriteString(kv.pre) - b.WriteString(key) - - var m *RawStreamMsg - var err error - var _opts [1]JSOpt - opts := _opts[:0] - if kv.useDirect { - opts = append(opts, DirectGet()) - } - - if revision == kvLatestRevision { - m, err = kv.js.GetLastMsg(kv.stream, b.String(), opts...) - } else { - m, err = kv.js.GetMsg(kv.stream, revision, opts...) - // If a sequence was provided, just make sure that the retrieved - // message subject matches the request. - if err == nil && m.Subject != b.String() { - return nil, ErrKeyNotFound - } - } - if err != nil { - if errors.Is(err, ErrMsgNotFound) { - err = ErrKeyNotFound - } - return nil, err - } - - entry := &kve{ - bucket: kv.name, - key: key, - value: m.Data, - revision: m.Sequence, - created: m.Time, - } - - // Double check here that this is not a DEL Operation marker. - if len(m.Header) > 0 { - switch m.Header.Get(kvop) { - case kvdel: - entry.op = KeyValueDelete - return entry, ErrKeyDeleted - case kvpurge: - entry.op = KeyValuePurge - return entry, ErrKeyDeleted - } - } - - return entry, nil -} - -// Put will place the new value for the key into the store. -func (kv *kvs) Put(key string, value []byte) (revision uint64, err error) { - if !keyValid(key) { - return 0, ErrInvalidKey - } - - var b strings.Builder - if kv.useJSPfx { - b.WriteString(kv.js.opts.pre) - } - if kv.putPre != _EMPTY_ { - b.WriteString(kv.putPre) - } else { - b.WriteString(kv.pre) - } - b.WriteString(key) - - pa, err := kv.js.Publish(b.String(), value) - if err != nil { - return 0, err - } - return pa.Sequence, err -} - -// PutString will place the string for the key into the store. -func (kv *kvs) PutString(key string, value string) (revision uint64, err error) { - return kv.Put(key, []byte(value)) -} - -// Create will add the key/value pair if it does not exist. -func (kv *kvs) Create(key string, value []byte) (revision uint64, err error) { - v, err := kv.Update(key, value, 0) - if err == nil { - return v, nil - } - - // TODO(dlc) - Since we have tombstones for DEL ops for watchers, this could be from that - // so we need to double check. - if e, err := kv.get(key, kvLatestRevision); errors.Is(err, ErrKeyDeleted) { - return kv.Update(key, value, e.Revision()) - } - - // Check if the expected last subject sequence is not zero which implies - // the key already exists. - if errors.Is(err, ErrKeyExists) { - jserr := ErrKeyExists.(*jsError) - return 0, fmt.Errorf("%w: %s", err, jserr.message) - } - - return 0, err -} - -// Update will update the value if the latest revision matches. -func (kv *kvs) Update(key string, value []byte, revision uint64) (uint64, error) { - if !keyValid(key) { - return 0, ErrInvalidKey - } - - var b strings.Builder - if kv.useJSPfx { - b.WriteString(kv.js.opts.pre) - } - b.WriteString(kv.pre) - b.WriteString(key) - - m := Msg{Subject: b.String(), Header: Header{}, Data: value} - m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(revision, 10)) - - pa, err := kv.js.PublishMsg(&m) - if err != nil { - return 0, err - } - return pa.Sequence, err -} - -// Delete will place a delete marker and leave all revisions. -func (kv *kvs) Delete(key string, opts ...DeleteOpt) error { - if !keyValid(key) { - return ErrInvalidKey - } - - var b strings.Builder - if kv.useJSPfx { - b.WriteString(kv.js.opts.pre) - } - if kv.putPre != _EMPTY_ { - b.WriteString(kv.putPre) - } else { - b.WriteString(kv.pre) - } - b.WriteString(key) - - // DEL op marker. For watch functionality. - m := NewMsg(b.String()) - - var o deleteOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureDelete(&o); err != nil { - return err - } - } - } - - if o.purge { - m.Header.Set(kvop, kvpurge) - m.Header.Set(MsgRollup, MsgRollupSubject) - } else { - m.Header.Set(kvop, kvdel) - } - - if o.revision != 0 { - m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(o.revision, 10)) - } - - _, err := kv.js.PublishMsg(m) - return err -} - -// Purge will remove the key and all revisions. -func (kv *kvs) Purge(key string, opts ...DeleteOpt) error { - return kv.Delete(key, append(opts, purge())...) -} - -const kvDefaultPurgeDeletesMarkerThreshold = 30 * time.Minute - -// PurgeDeletes will remove all current delete markers. -// This is a maintenance option if there is a larger buildup of delete markers. -// See DeleteMarkersOlderThan() option for more information. -func (kv *kvs) PurgeDeletes(opts ...PurgeOpt) error { - var o purgeOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configurePurge(&o); err != nil { - return err - } - } - } - // Transfer possible context purge option to the watcher. This is the - // only option that matters for the PurgeDeletes() feature. - var wopts []WatchOpt - if o.ctx != nil { - wopts = append(wopts, Context(o.ctx)) - } - watcher, err := kv.WatchAll(wopts...) - if err != nil { - return err - } - defer watcher.Stop() - - var limit time.Time - olderThan := o.dmthr - // Negative value is used to instruct to always remove markers, regardless - // of age. If set to 0 (or not set), use our default value. - if olderThan == 0 { - olderThan = kvDefaultPurgeDeletesMarkerThreshold - } - if olderThan > 0 { - limit = time.Now().Add(-olderThan) - } - - var deleteMarkers []KeyValueEntry - for entry := range watcher.Updates() { - if entry == nil { - break - } - if op := entry.Operation(); op == KeyValueDelete || op == KeyValuePurge { - deleteMarkers = append(deleteMarkers, entry) - } - } - - var ( - pr StreamPurgeRequest - b strings.Builder - ) - // Do actual purges here. - for _, entry := range deleteMarkers { - b.WriteString(kv.pre) - b.WriteString(entry.Key()) - pr.Subject = b.String() - pr.Keep = 0 - if olderThan > 0 && entry.Created().After(limit) { - pr.Keep = 1 - } - if err := kv.js.purgeStream(kv.stream, &pr); err != nil { - return err - } - b.Reset() - } - return nil -} - -// Keys() will return all keys. -func (kv *kvs) Keys(opts ...WatchOpt) ([]string, error) { - opts = append(opts, IgnoreDeletes(), MetaOnly()) - watcher, err := kv.WatchAll(opts...) - if err != nil { - return nil, err - } - defer watcher.Stop() - - var keys []string - for entry := range watcher.Updates() { - if entry == nil { - break - } - keys = append(keys, entry.Key()) - } - if len(keys) == 0 { - return nil, ErrNoKeysFound - } - return keys, nil -} - -type keyLister struct { - watcher KeyWatcher - keys chan string -} - -// ListKeys will return all keys. -func (kv *kvs) ListKeys(opts ...WatchOpt) (KeyLister, error) { - opts = append(opts, IgnoreDeletes(), MetaOnly()) - watcher, err := kv.WatchAll(opts...) - if err != nil { - return nil, err - } - kl := &keyLister{watcher: watcher, keys: make(chan string, 256)} - - go func() { - defer close(kl.keys) - defer watcher.Stop() - for entry := range watcher.Updates() { - if entry == nil { - return - } - kl.keys <- entry.Key() - } - }() - return kl, nil -} - -func (kl *keyLister) Keys() <-chan string { - return kl.keys -} - -func (kl *keyLister) Stop() error { - return kl.watcher.Stop() -} - -// History will return all values for the key. -func (kv *kvs) History(key string, opts ...WatchOpt) ([]KeyValueEntry, error) { - opts = append(opts, IncludeHistory()) - watcher, err := kv.Watch(key, opts...) - if err != nil { - return nil, err - } - defer watcher.Stop() - - var entries []KeyValueEntry - for entry := range watcher.Updates() { - if entry == nil { - break - } - entries = append(entries, entry) - } - if len(entries) == 0 { - return nil, ErrKeyNotFound - } - return entries, nil -} - -// Implementation for Watch -type watcher struct { - mu sync.Mutex - updates chan KeyValueEntry - sub *Subscription - initDone bool - initPending uint64 - received uint64 - ctx context.Context -} - -// Context returns the context for the watcher if set. -func (w *watcher) Context() context.Context { - if w == nil { - return nil - } - return w.ctx -} - -// Updates returns the interior channel. -func (w *watcher) Updates() <-chan KeyValueEntry { - if w == nil { - return nil - } - return w.updates -} - -// Stop will unsubscribe from the watcher. -func (w *watcher) Stop() error { - if w == nil { - return nil - } - return w.sub.Unsubscribe() -} - -// WatchAll watches all keys. -func (kv *kvs) WatchAll(opts ...WatchOpt) (KeyWatcher, error) { - return kv.Watch(AllKeys, opts...) -} - -// Watch will fire the callback when a key that matches the keys pattern is updated. -// keys needs to be a valid NATS subject. -func (kv *kvs) Watch(keys string, opts ...WatchOpt) (KeyWatcher, error) { - var o watchOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureWatcher(&o); err != nil { - return nil, err - } - } - } - - // Could be a pattern so don't check for validity as we normally do. - var b strings.Builder - b.WriteString(kv.pre) - b.WriteString(keys) - keys = b.String() - - // We will block below on placing items on the chan. That is by design. - w := &watcher{updates: make(chan KeyValueEntry, 256), ctx: o.ctx} - - update := func(m *Msg) { - tokens, err := parser.GetMetadataFields(m.Reply) - if err != nil { - return - } - if len(m.Subject) <= len(kv.pre) { - return - } - subj := m.Subject[len(kv.pre):] - - var op KeyValueOp - if len(m.Header) > 0 { - switch m.Header.Get(kvop) { - case kvdel: - op = KeyValueDelete - case kvpurge: - op = KeyValuePurge - } - } - delta := parser.ParseNum(tokens[parser.AckNumPendingTokenPos]) - w.mu.Lock() - defer w.mu.Unlock() - if !o.ignoreDeletes || (op != KeyValueDelete && op != KeyValuePurge) { - entry := &kve{ - bucket: kv.name, - key: subj, - value: m.Data, - revision: parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]), - created: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))), - delta: delta, - op: op, - } - w.updates <- entry - } - // Check if done and initial values. - // Skip if UpdatesOnly() is set, since there will never be updates initially. - if !w.initDone { - w.received++ - // We set this on the first trip through.. - if w.initPending == 0 { - w.initPending = delta - } - if w.received > w.initPending || delta == 0 { - w.initDone = true - w.updates <- nil - } - } - } - - // Used ordered consumer to deliver results. - subOpts := []SubOpt{BindStream(kv.stream), OrderedConsumer()} - if !o.includeHistory { - subOpts = append(subOpts, DeliverLastPerSubject()) - } - if o.updatesOnly { - subOpts = append(subOpts, DeliverNew()) - } - if o.metaOnly { - subOpts = append(subOpts, HeadersOnly()) - } - if o.ctx != nil { - subOpts = append(subOpts, Context(o.ctx)) - } - // Create the sub and rest of initialization under the lock. - // We want to prevent the race between this code and the - // update() callback. - w.mu.Lock() - defer w.mu.Unlock() - sub, err := kv.js.Subscribe(keys, update, subOpts...) - if err != nil { - return nil, err - } - sub.mu.Lock() - // If there were no pending messages at the time of the creation - // of the consumer, send the marker. - // Skip if UpdatesOnly() is set, since there will never be updates initially. - if !o.updatesOnly { - if sub.jsi != nil && sub.jsi.pending == 0 { - w.initDone = true - w.updates <- nil - } - } else { - // if UpdatesOnly was used, mark initialization as complete - w.initDone = true - } - // Set us up to close when the waitForMessages func returns. - sub.pDone = func(_ string) { - close(w.updates) - } - sub.mu.Unlock() - - w.sub = sub - return w, nil -} - -// Bucket returns the current bucket name (JetStream stream). -func (kv *kvs) Bucket() string { - return kv.name -} - -// KeyValueBucketStatus represents status of a Bucket, implements KeyValueStatus -type KeyValueBucketStatus struct { - nfo *StreamInfo - bucket string -} - -// Bucket the name of the bucket -func (s *KeyValueBucketStatus) Bucket() string { return s.bucket } - -// Values is how many messages are in the bucket, including historical values -func (s *KeyValueBucketStatus) Values() uint64 { return s.nfo.State.Msgs } - -// History returns the configured history kept per key -func (s *KeyValueBucketStatus) History() int64 { return s.nfo.Config.MaxMsgsPerSubject } - -// TTL is how long the bucket keeps values for -func (s *KeyValueBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge } - -// BackingStore indicates what technology is used for storage of the bucket -func (s *KeyValueBucketStatus) BackingStore() string { return "JetStream" } - -// StreamInfo is the stream info retrieved to create the status -func (s *KeyValueBucketStatus) StreamInfo() *StreamInfo { return s.nfo } - -// Bytes is the size of the stream -func (s *KeyValueBucketStatus) Bytes() uint64 { return s.nfo.State.Bytes } - -// IsCompressed indicates if the data is compressed on disk -func (s *KeyValueBucketStatus) IsCompressed() bool { return s.nfo.Config.Compression != NoCompression } - -// Status retrieves the status and configuration of a bucket -func (kv *kvs) Status() (KeyValueStatus, error) { - nfo, err := kv.js.StreamInfo(kv.stream) - if err != nil { - return nil, err - } - - return &KeyValueBucketStatus{nfo: nfo, bucket: kv.name}, nil -} - -// KeyValueStoreNames is used to retrieve a list of key value store names -func (js *js) KeyValueStoreNames() <-chan string { - ch := make(chan string) - l := &streamNamesLister{js: js} - l.js.opts.streamListSubject = fmt.Sprintf(kvSubjectsTmpl, "*") - go func() { - defer close(ch) - for l.Next() { - for _, name := range l.Page() { - if !strings.HasPrefix(name, kvBucketNamePre) { - continue - } - ch <- strings.TrimPrefix(name, kvBucketNamePre) - } - } - }() - - return ch -} - -// KeyValueStores is used to retrieve a list of key value store statuses -func (js *js) KeyValueStores() <-chan KeyValueStatus { - ch := make(chan KeyValueStatus) - l := &streamLister{js: js} - l.js.opts.streamListSubject = fmt.Sprintf(kvSubjectsTmpl, "*") - go func() { - defer close(ch) - for l.Next() { - for _, info := range l.Page() { - if !strings.HasPrefix(info.Config.Name, kvBucketNamePre) { - continue - } - ch <- &KeyValueBucketStatus{nfo: info, bucket: strings.TrimPrefix(info.Config.Name, kvBucketNamePre)} - } - } - }() - return ch -} - -func mapStreamToKVS(js *js, info *StreamInfo) *kvs { - bucket := strings.TrimPrefix(info.Config.Name, kvBucketNamePre) - - kv := &kvs{ - name: bucket, - stream: info.Config.Name, - pre: fmt.Sprintf(kvSubjectsPreTmpl, bucket), - js: js, - // Determine if we need to use the JS prefix in front of Put and Delete operations - useJSPfx: js.opts.pre != defaultAPIPrefix, - useDirect: info.Config.AllowDirect, - } - - // If we are mirroring, we will have mirror direct on, so just use the mirror name - // and override use - if m := info.Config.Mirror; m != nil { - bucket := strings.TrimPrefix(m.Name, kvBucketNamePre) - if m.External != nil && m.External.APIPrefix != _EMPTY_ { - kv.useJSPfx = false - kv.pre = fmt.Sprintf(kvSubjectsPreTmpl, bucket) - kv.putPre = fmt.Sprintf(kvSubjectsPreDomainTmpl, m.External.APIPrefix, bucket) - } else { - kv.putPre = fmt.Sprintf(kvSubjectsPreTmpl, bucket) - } - } - - return kv -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/legacy_jetstream.md b/backend/services/controller/vendor/github.com/nats-io/nats.go/legacy_jetstream.md deleted file mode 100644 index 43e1c73..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/legacy_jetstream.md +++ /dev/null @@ -1,83 +0,0 @@ -# Legacy JetStream API - -This is a documentation for the legacy JetStream API. A README for the current -API can be found [here](jetstream/README.md) - -## JetStream Basic Usage - -```go -import "github.com/nats-io/nats.go" - -// Connect to NATS -nc, _ := nats.Connect(nats.DefaultURL) - -// Create JetStream Context -js, _ := nc.JetStream(nats.PublishAsyncMaxPending(256)) - -// Simple Stream Publisher -js.Publish("ORDERS.scratch", []byte("hello")) - -// Simple Async Stream Publisher -for i := 0; i < 500; i++ { - js.PublishAsync("ORDERS.scratch", []byte("hello")) -} -select { -case <-js.PublishAsyncComplete(): -case <-time.After(5 * time.Second): - fmt.Println("Did not resolve in time") -} - -// Simple Async Ephemeral Consumer -js.Subscribe("ORDERS.*", func(m *nats.Msg) { - fmt.Printf("Received a JetStream message: %s\n", string(m.Data)) -}) - -// Simple Sync Durable Consumer (optional SubOpts at the end) -sub, err := js.SubscribeSync("ORDERS.*", nats.Durable("MONITOR"), nats.MaxDeliver(3)) -m, err := sub.NextMsg(timeout) - -// Simple Pull Consumer -sub, err := js.PullSubscribe("ORDERS.*", "MONITOR") -msgs, err := sub.Fetch(10) - -// Unsubscribe -sub.Unsubscribe() - -// Drain -sub.Drain() -``` - -## JetStream Basic Management - -```go -import "github.com/nats-io/nats.go" - -// Connect to NATS -nc, _ := nats.Connect(nats.DefaultURL) - -// Create JetStream Context -js, _ := nc.JetStream() - -// Create a Stream -js.AddStream(&nats.StreamConfig{ - Name: "ORDERS", - Subjects: []string{"ORDERS.*"}, -}) - -// Update a Stream -js.UpdateStream(&nats.StreamConfig{ - Name: "ORDERS", - MaxBytes: 8, -}) - -// Create a Consumer -js.AddConsumer("ORDERS", &nats.ConsumerConfig{ - Durable: "MONITOR", -}) - -// Delete Consumer -js.DeleteConsumer("ORDERS", "MONITOR") - -// Delete Stream -js.DeleteStream("ORDERS") -``` diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/nats.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/nats.go deleted file mode 100644 index 5b145c6..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/nats.go +++ /dev/null @@ -1,5694 +0,0 @@ -// Copyright 2012-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// A Go client for the NATS messaging system (https://nats.io). -package nats - -import ( - "bufio" - "bytes" - "crypto/tls" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "math/rand" - "net" - "net/http" - "net/textproto" - "net/url" - "os" - "path/filepath" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/nats-io/nkeys" - "github.com/nats-io/nuid" - - "github.com/nats-io/nats.go/util" -) - -// Default Constants -const ( - Version = "1.33.1" - DefaultURL = "nats://127.0.0.1:4222" - DefaultPort = 4222 - DefaultMaxReconnect = 60 - DefaultReconnectWait = 2 * time.Second - DefaultReconnectJitter = 100 * time.Millisecond - DefaultReconnectJitterTLS = time.Second - DefaultTimeout = 2 * time.Second - DefaultPingInterval = 2 * time.Minute - DefaultMaxPingOut = 2 - DefaultMaxChanLen = 64 * 1024 // 64k - DefaultReconnectBufSize = 8 * 1024 * 1024 // 8MB - RequestChanLen = 8 - DefaultDrainTimeout = 30 * time.Second - DefaultFlusherTimeout = time.Minute - LangString = "go" -) - -const ( - // STALE_CONNECTION is for detection and proper handling of stale connections. - STALE_CONNECTION = "stale connection" - - // PERMISSIONS_ERR is for when nats server subject authorization has failed. - PERMISSIONS_ERR = "permissions violation" - - // AUTHORIZATION_ERR is for when nats server user authorization has failed. - AUTHORIZATION_ERR = "authorization violation" - - // AUTHENTICATION_EXPIRED_ERR is for when nats server user authorization has expired. - AUTHENTICATION_EXPIRED_ERR = "user authentication expired" - - // AUTHENTICATION_REVOKED_ERR is for when user authorization has been revoked. - AUTHENTICATION_REVOKED_ERR = "user authentication revoked" - - // ACCOUNT_AUTHENTICATION_EXPIRED_ERR is for when nats server account authorization has expired. - ACCOUNT_AUTHENTICATION_EXPIRED_ERR = "account authentication expired" - - // MAX_CONNECTIONS_ERR is for when nats server denies the connection due to server max_connections limit - MAX_CONNECTIONS_ERR = "maximum connections exceeded" -) - -// Errors -var ( - ErrConnectionClosed = errors.New("nats: connection closed") - ErrConnectionDraining = errors.New("nats: connection draining") - ErrDrainTimeout = errors.New("nats: draining connection timed out") - ErrConnectionReconnecting = errors.New("nats: connection reconnecting") - ErrSecureConnRequired = errors.New("nats: secure connection required") - ErrSecureConnWanted = errors.New("nats: secure connection not available") - ErrBadSubscription = errors.New("nats: invalid subscription") - ErrTypeSubscription = errors.New("nats: invalid subscription type") - ErrBadSubject = errors.New("nats: invalid subject") - ErrBadQueueName = errors.New("nats: invalid queue name") - ErrSlowConsumer = errors.New("nats: slow consumer, messages dropped") - ErrTimeout = errors.New("nats: timeout") - ErrBadTimeout = errors.New("nats: timeout invalid") - ErrAuthorization = errors.New("nats: authorization violation") - ErrAuthExpired = errors.New("nats: authentication expired") - ErrAuthRevoked = errors.New("nats: authentication revoked") - ErrAccountAuthExpired = errors.New("nats: account authentication expired") - ErrNoServers = errors.New("nats: no servers available for connection") - ErrJsonParse = errors.New("nats: connect message, json parse error") - ErrChanArg = errors.New("nats: argument needs to be a channel type") - ErrMaxPayload = errors.New("nats: maximum payload exceeded") - ErrMaxMessages = errors.New("nats: maximum messages delivered") - ErrSyncSubRequired = errors.New("nats: illegal call on an async subscription") - ErrMultipleTLSConfigs = errors.New("nats: multiple tls.Configs not allowed") - ErrNoInfoReceived = errors.New("nats: protocol exception, INFO not received") - ErrReconnectBufExceeded = errors.New("nats: outbound buffer limit exceeded") - ErrInvalidConnection = errors.New("nats: invalid connection") - ErrInvalidMsg = errors.New("nats: invalid message or message nil") - ErrInvalidArg = errors.New("nats: invalid argument") - ErrInvalidContext = errors.New("nats: invalid context") - ErrNoDeadlineContext = errors.New("nats: context requires a deadline") - ErrNoEchoNotSupported = errors.New("nats: no echo option not supported by this server") - ErrClientIDNotSupported = errors.New("nats: client ID not supported by this server") - ErrUserButNoSigCB = errors.New("nats: user callback defined without a signature handler") - ErrNkeyButNoSigCB = errors.New("nats: nkey defined without a signature handler") - ErrNoUserCB = errors.New("nats: user callback not defined") - ErrNkeyAndUser = errors.New("nats: user callback and nkey defined") - ErrNkeysNotSupported = errors.New("nats: nkeys not supported by the server") - ErrStaleConnection = errors.New("nats: " + STALE_CONNECTION) - ErrTokenAlreadySet = errors.New("nats: token and token handler both set") - ErrMsgNotBound = errors.New("nats: message is not bound to subscription/connection") - ErrMsgNoReply = errors.New("nats: message does not have a reply") - ErrClientIPNotSupported = errors.New("nats: client IP not supported by this server") - ErrDisconnected = errors.New("nats: server is disconnected") - ErrHeadersNotSupported = errors.New("nats: headers not supported by this server") - ErrBadHeaderMsg = errors.New("nats: message could not decode headers") - ErrNoResponders = errors.New("nats: no responders available for request") - ErrMaxConnectionsExceeded = errors.New("nats: server maximum connections exceeded") - ErrConnectionNotTLS = errors.New("nats: connection is not tls") -) - -// GetDefaultOptions returns default configuration options for the client. -func GetDefaultOptions() Options { - return Options{ - AllowReconnect: true, - MaxReconnect: DefaultMaxReconnect, - ReconnectWait: DefaultReconnectWait, - ReconnectJitter: DefaultReconnectJitter, - ReconnectJitterTLS: DefaultReconnectJitterTLS, - Timeout: DefaultTimeout, - PingInterval: DefaultPingInterval, - MaxPingsOut: DefaultMaxPingOut, - SubChanLen: DefaultMaxChanLen, - ReconnectBufSize: DefaultReconnectBufSize, - DrainTimeout: DefaultDrainTimeout, - FlusherTimeout: DefaultFlusherTimeout, - } -} - -// DEPRECATED: Use GetDefaultOptions() instead. -// DefaultOptions is not safe for use by multiple clients. -// For details see #308. -var DefaultOptions = GetDefaultOptions() - -// Status represents the state of the connection. -type Status int - -const ( - DISCONNECTED = Status(iota) - CONNECTED - CLOSED - RECONNECTING - CONNECTING - DRAINING_SUBS - DRAINING_PUBS -) - -func (s Status) String() string { - switch s { - case DISCONNECTED: - return "DISCONNECTED" - case CONNECTED: - return "CONNECTED" - case CLOSED: - return "CLOSED" - case RECONNECTING: - return "RECONNECTING" - case CONNECTING: - return "CONNECTING" - case DRAINING_SUBS: - return "DRAINING_SUBS" - case DRAINING_PUBS: - return "DRAINING_PUBS" - } - return "unknown status" -} - -// ConnHandler is used for asynchronous events such as -// disconnected and closed connections. -type ConnHandler func(*Conn) - -// ConnErrHandler is used to process asynchronous events like -// disconnected connection with the error (if any). -type ConnErrHandler func(*Conn, error) - -// ErrHandler is used to process asynchronous errors encountered -// while processing inbound messages. -type ErrHandler func(*Conn, *Subscription, error) - -// UserJWTHandler is used to fetch and return the account signed -// JWT for this user. -type UserJWTHandler func() (string, error) - -// TLSCertHandler is used to fetch and return tls certificate. -type TLSCertHandler func() (tls.Certificate, error) - -// RootCAsHandler is used to fetch and return a set of root certificate -// authorities that clients use when verifying server certificates. -type RootCAsHandler func() (*x509.CertPool, error) - -// SignatureHandler is used to sign a nonce from the server while -// authenticating with nkeys. The user should sign the nonce and -// return the raw signature. The client will base64 encode this to -// send to the server. -type SignatureHandler func([]byte) ([]byte, error) - -// AuthTokenHandler is used to generate a new token. -type AuthTokenHandler func() string - -// ReconnectDelayHandler is used to get from the user the desired -// delay the library should pause before attempting to reconnect -// again. Note that this is invoked after the library tried the -// whole list of URLs and failed to reconnect. -type ReconnectDelayHandler func(attempts int) time.Duration - -// asyncCB is used to preserve order for async callbacks. -type asyncCB struct { - f func() - next *asyncCB -} - -type asyncCallbacksHandler struct { - mu sync.Mutex - cond *sync.Cond - head *asyncCB - tail *asyncCB -} - -// Option is a function on the options for a connection. -type Option func(*Options) error - -// CustomDialer can be used to specify any dialer, not necessarily a -// *net.Dialer. A CustomDialer may also implement `SkipTLSHandshake() bool` -// in order to skip the TLS handshake in case not required. -type CustomDialer interface { - Dial(network, address string) (net.Conn, error) -} - -type InProcessConnProvider interface { - InProcessConn() (net.Conn, error) -} - -// Options can be used to create a customized connection. -type Options struct { - - // Url represents a single NATS server url to which the client - // will be connecting. If the Servers option is also set, it - // then becomes the first server in the Servers array. - Url string - - // InProcessServer represents a NATS server running within the - // same process. If this is set then we will attempt to connect - // to the server directly rather than using external TCP conns. - InProcessServer InProcessConnProvider - - // Servers is a configured set of servers which this client - // will use when attempting to connect. - Servers []string - - // NoRandomize configures whether we will randomize the - // server pool. - NoRandomize bool - - // NoEcho configures whether the server will echo back messages - // that are sent on this connection if we also have matching subscriptions. - // Note this is supported on servers >= version 1.2. Proto 1 or greater. - NoEcho bool - - // Name is an optional name label which will be sent to the server - // on CONNECT to identify the client. - Name string - - // Verbose signals the server to send an OK ack for commands - // successfully processed by the server. - Verbose bool - - // Pedantic signals the server whether it should be doing further - // validation of subjects. - Pedantic bool - - // Secure enables TLS secure connections that skip server - // verification by default. NOT RECOMMENDED. - Secure bool - - // TLSConfig is a custom TLS configuration to use for secure - // transports. - TLSConfig *tls.Config - - // TLSCertCB is used to fetch and return custom tls certificate. - TLSCertCB TLSCertHandler - - // TLSHandshakeFirst is used to instruct the library perform - // the TLS handshake right after the connect and before receiving - // the INFO protocol from the server. If this option is enabled - // but the server is not configured to perform the TLS handshake - // first, the connection will fail. - TLSHandshakeFirst bool - - // RootCAsCB is used to fetch and return a set of root certificate - // authorities that clients use when verifying server certificates. - RootCAsCB RootCAsHandler - - // AllowReconnect enables reconnection logic to be used when we - // encounter a disconnect from the current server. - AllowReconnect bool - - // MaxReconnect sets the number of reconnect attempts that will be - // tried before giving up. If negative, then it will never give up - // trying to reconnect. - // Defaults to 60. - MaxReconnect int - - // ReconnectWait sets the time to backoff after attempting a reconnect - // to a server that we were already connected to previously. - // Defaults to 2s. - ReconnectWait time.Duration - - // CustomReconnectDelayCB is invoked after the library tried every - // URL in the server list and failed to reconnect. It passes to the - // user the current number of attempts. This function returns the - // amount of time the library will sleep before attempting to reconnect - // again. It is strongly recommended that this value contains some - // jitter to prevent all connections to attempt reconnecting at the same time. - CustomReconnectDelayCB ReconnectDelayHandler - - // ReconnectJitter sets the upper bound for a random delay added to - // ReconnectWait during a reconnect when no TLS is used. - // Defaults to 100ms. - ReconnectJitter time.Duration - - // ReconnectJitterTLS sets the upper bound for a random delay added to - // ReconnectWait during a reconnect when TLS is used. - // Defaults to 1s. - ReconnectJitterTLS time.Duration - - // Timeout sets the timeout for a Dial operation on a connection. - // Defaults to 2s. - Timeout time.Duration - - // DrainTimeout sets the timeout for a Drain Operation to complete. - // Defaults to 30s. - DrainTimeout time.Duration - - // FlusherTimeout is the maximum time to wait for write operations - // to the underlying connection to complete (including the flusher loop). - // Defaults to 1m. - FlusherTimeout time.Duration - - // PingInterval is the period at which the client will be sending ping - // commands to the server, disabled if 0 or negative. - // Defaults to 2m. - PingInterval time.Duration - - // MaxPingsOut is the maximum number of pending ping commands that can - // be awaiting a response before raising an ErrStaleConnection error. - // Defaults to 2. - MaxPingsOut int - - // ClosedCB sets the closed handler that is called when a client will - // no longer be connected. - ClosedCB ConnHandler - - // DisconnectedCB sets the disconnected handler that is called - // whenever the connection is disconnected. - // Will not be called if DisconnectedErrCB is set - // DEPRECATED. Use DisconnectedErrCB which passes error that caused - // the disconnect event. - DisconnectedCB ConnHandler - - // DisconnectedErrCB sets the disconnected error handler that is called - // whenever the connection is disconnected. - // Disconnected error could be nil, for instance when user explicitly closes the connection. - // DisconnectedCB will not be called if DisconnectedErrCB is set - DisconnectedErrCB ConnErrHandler - - // ConnectedCB sets the connected handler called when the initial connection - // is established. It is not invoked on successful reconnects - for reconnections, - // use ReconnectedCB. ConnectedCB can be used in conjunction with RetryOnFailedConnect - // to detect whether the initial connect was successful. - ConnectedCB ConnHandler - - // ReconnectedCB sets the reconnected handler called whenever - // the connection is successfully reconnected. - ReconnectedCB ConnHandler - - // DiscoveredServersCB sets the callback that is invoked whenever a new - // server has joined the cluster. - DiscoveredServersCB ConnHandler - - // AsyncErrorCB sets the async error handler (e.g. slow consumer errors) - AsyncErrorCB ErrHandler - - // ReconnectBufSize is the size of the backing bufio during reconnect. - // Once this has been exhausted publish operations will return an error. - // Defaults to 8388608 bytes (8MB). - ReconnectBufSize int - - // SubChanLen is the size of the buffered channel used between the socket - // Go routine and the message delivery for SyncSubscriptions. - // NOTE: This does not affect AsyncSubscriptions which are - // dictated by PendingLimits() - // Defaults to 65536. - SubChanLen int - - // UserJWT sets the callback handler that will fetch a user's JWT. - UserJWT UserJWTHandler - - // Nkey sets the public nkey that will be used to authenticate - // when connecting to the server. UserJWT and Nkey are mutually exclusive - // and if defined, UserJWT will take precedence. - Nkey string - - // SignatureCB designates the function used to sign the nonce - // presented from the server. - SignatureCB SignatureHandler - - // User sets the username to be used when connecting to the server. - User string - - // Password sets the password to be used when connecting to a server. - Password string - - // Token sets the token to be used when connecting to a server. - Token string - - // TokenHandler designates the function used to generate the token to be used when connecting to a server. - TokenHandler AuthTokenHandler - - // Dialer allows a custom net.Dialer when forming connections. - // DEPRECATED: should use CustomDialer instead. - Dialer *net.Dialer - - // CustomDialer allows to specify a custom dialer (not necessarily - // a *net.Dialer). - CustomDialer CustomDialer - - // UseOldRequestStyle forces the old method of Requests that utilize - // a new Inbox and a new Subscription for each request. - UseOldRequestStyle bool - - // NoCallbacksAfterClientClose allows preventing the invocation of - // callbacks after Close() is called. Client won't receive notifications - // when Close is invoked by user code. Default is to invoke the callbacks. - NoCallbacksAfterClientClose bool - - // LameDuckModeHandler sets the callback to invoke when the server notifies - // the connection that it entered lame duck mode, that is, going to - // gradually disconnect all its connections before shutting down. This is - // often used in deployments when upgrading NATS Servers. - LameDuckModeHandler ConnHandler - - // RetryOnFailedConnect sets the connection in reconnecting state right - // away if it can't connect to a server in the initial set. The - // MaxReconnect and ReconnectWait options are used for this process, - // similarly to when an established connection is disconnected. - // If a ReconnectHandler is set, it will be invoked on the first - // successful reconnect attempt (if the initial connect fails), - // and if a ClosedHandler is set, it will be invoked if - // it fails to connect (after exhausting the MaxReconnect attempts). - RetryOnFailedConnect bool - - // For websocket connections, indicates to the server that the connection - // supports compression. If the server does too, then data will be compressed. - Compression bool - - // For websocket connections, adds a path to connections url. - // This is useful when connecting to NATS behind a proxy. - ProxyPath string - - // InboxPrefix allows the default _INBOX prefix to be customized - InboxPrefix string - - // IgnoreAuthErrorAbort - if set to true, client opts out of the default connect behavior of aborting - // subsequent reconnect attempts if server returns the same auth error twice (regardless of reconnect policy). - IgnoreAuthErrorAbort bool - - // SkipHostLookup skips the DNS lookup for the server hostname. - SkipHostLookup bool -} - -const ( - // Scratch storage for assembling protocol headers - scratchSize = 512 - - // The size of the bufio reader/writer on top of the socket. - defaultBufSize = 32768 - - // The buffered size of the flush "kick" channel - flushChanSize = 1 - - // Default server pool size - srvPoolSize = 4 - - // NUID size - nuidSize = 22 - - // Default ports used if none is specified in given URL(s) - defaultWSPortString = "80" - defaultWSSPortString = "443" - defaultPortString = "4222" -) - -// A Conn represents a bare connection to a nats-server. -// It can send and receive []byte payloads. -// The connection is safe to use in multiple Go routines concurrently. -type Conn struct { - // Keep all members for which we use atomic at the beginning of the - // struct and make sure they are all 64bits (or use padding if necessary). - // atomic.* functions crash on 32bit machines if operand is not aligned - // at 64bit. See https://github.com/golang/go/issues/599 - Statistics - mu sync.RWMutex - // Opts holds the configuration of the Conn. - // Modifying the configuration of a running Conn is a race. - Opts Options - wg sync.WaitGroup - srvPool []*srv - current *srv - urls map[string]struct{} // Keep track of all known URLs (used by processInfo) - conn net.Conn - bw *natsWriter - br *natsReader - fch chan struct{} - info serverInfo - ssid int64 - subsMu sync.RWMutex - subs map[int64]*Subscription - ach *asyncCallbacksHandler - pongs []chan struct{} - scratch [scratchSize]byte - status Status - statListeners map[Status][]chan Status - initc bool // true if the connection is performing the initial connect - err error - ps *parseState - ptmr *time.Timer - pout int - ar bool // abort reconnect - rqch chan struct{} - ws bool // true if a websocket connection - - // New style response handler - respSub string // The wildcard subject - respSubPrefix string // the wildcard prefix including trailing . - respSubLen int // the length of the wildcard prefix excluding trailing . - respScanf string // The scanf template to extract mux token - respMux *Subscription // A single response subscription - respMap map[string]chan *Msg // Request map for the response msg channels - respRand *rand.Rand // Used for generating suffix - - // Msg filters for testing. - // Protected by subsMu - filters map[string]msgFilter -} - -type natsReader struct { - r io.Reader - buf []byte - off int - n int -} - -type natsWriter struct { - w io.Writer - bufs []byte - limit int - pending *bytes.Buffer - plimit int -} - -// Subscription represents interest in a given subject. -type Subscription struct { - mu sync.Mutex - sid int64 - - // Subject that represents this subscription. This can be different - // than the received subject inside a Msg if this is a wildcard. - Subject string - - // Optional queue group name. If present, all subscriptions with the - // same name will form a distributed queue, and each message will - // only be processed by one member of the group. - Queue string - - // For holding information about a JetStream consumer. - jsi *jsSub - - delivered uint64 - max uint64 - conn *Conn - mcb MsgHandler - mch chan *Msg - closed bool - sc bool - connClosed bool - - // Type of Subscription - typ SubscriptionType - - // Async linked list - pHead *Msg - pTail *Msg - pCond *sync.Cond - pDone func(subject string) - - // Pending stats, async subscriptions, high-speed etc. - pMsgs int - pBytes int - pMsgsMax int - pBytesMax int - pMsgsLimit int - pBytesLimit int - dropped int -} - -// Msg represents a message delivered by NATS. This structure is used -// by Subscribers and PublishMsg(). -// -// # Types of Acknowledgements -// -// In case using JetStream, there are multiple ways to ack a Msg: -// -// // Acknowledgement that a message has been processed. -// msg.Ack() -// -// // Negatively acknowledges a message. -// msg.Nak() -// -// // Terminate a message so that it is not redelivered further. -// msg.Term() -// -// // Signal the server that the message is being worked on and reset redelivery timer. -// msg.InProgress() -type Msg struct { - Subject string - Reply string - Header Header - Data []byte - Sub *Subscription - // Internal - next *Msg - wsz int - barrier *barrierInfo - ackd uint32 -} - -// Compares two msgs, ignores sub but checks all other public fields. -func (m *Msg) Equal(msg *Msg) bool { - if m == msg { - return true - } - if m == nil || msg == nil { - return false - } - if m.Subject != msg.Subject || m.Reply != msg.Reply { - return false - } - if !bytes.Equal(m.Data, msg.Data) { - return false - } - if len(m.Header) != len(msg.Header) { - return false - } - for k, v := range m.Header { - val, ok := msg.Header[k] - if !ok || len(v) != len(val) { - return false - } - for i, hdr := range v { - if hdr != val[i] { - return false - } - } - } - return true -} - -// Size returns a message size in bytes. -func (m *Msg) Size() int { - if m.wsz != 0 { - return m.wsz - } - hdr, _ := m.headerBytes() - return len(m.Subject) + len(m.Reply) + len(hdr) + len(m.Data) -} - -func (m *Msg) headerBytes() ([]byte, error) { - var hdr []byte - if len(m.Header) == 0 { - return hdr, nil - } - - var b bytes.Buffer - _, err := b.WriteString(hdrLine) - if err != nil { - return nil, ErrBadHeaderMsg - } - - err = http.Header(m.Header).Write(&b) - if err != nil { - return nil, ErrBadHeaderMsg - } - - _, err = b.WriteString(crlf) - if err != nil { - return nil, ErrBadHeaderMsg - } - - return b.Bytes(), nil -} - -type barrierInfo struct { - refs int64 - f func() -} - -// Tracks various stats received and sent on this connection, -// including counts for messages and bytes. -type Statistics struct { - InMsgs uint64 - OutMsgs uint64 - InBytes uint64 - OutBytes uint64 - Reconnects uint64 -} - -// Tracks individual backend servers. -type srv struct { - url *url.URL - didConnect bool - reconnects int - lastErr error - isImplicit bool - tlsName string -} - -// The INFO block received from the server. -type serverInfo struct { - ID string `json:"server_id"` - Name string `json:"server_name"` - Proto int `json:"proto"` - Version string `json:"version"` - Host string `json:"host"` - Port int `json:"port"` - Headers bool `json:"headers"` - AuthRequired bool `json:"auth_required,omitempty"` - TLSRequired bool `json:"tls_required,omitempty"` - TLSAvailable bool `json:"tls_available,omitempty"` - MaxPayload int64 `json:"max_payload"` - CID uint64 `json:"client_id,omitempty"` - ClientIP string `json:"client_ip,omitempty"` - Nonce string `json:"nonce,omitempty"` - Cluster string `json:"cluster,omitempty"` - ConnectURLs []string `json:"connect_urls,omitempty"` - LameDuckMode bool `json:"ldm,omitempty"` -} - -const ( - // clientProtoZero is the original client protocol from 2009. - // http://nats.io/documentation/internals/nats-protocol/ - /* clientProtoZero */ _ = iota - // clientProtoInfo signals a client can receive more then the original INFO block. - // This can be used to update clients on other cluster members, etc. - clientProtoInfo -) - -type connectInfo struct { - Verbose bool `json:"verbose"` - Pedantic bool `json:"pedantic"` - UserJWT string `json:"jwt,omitempty"` - Nkey string `json:"nkey,omitempty"` - Signature string `json:"sig,omitempty"` - User string `json:"user,omitempty"` - Pass string `json:"pass,omitempty"` - Token string `json:"auth_token,omitempty"` - TLS bool `json:"tls_required"` - Name string `json:"name"` - Lang string `json:"lang"` - Version string `json:"version"` - Protocol int `json:"protocol"` - Echo bool `json:"echo"` - Headers bool `json:"headers"` - NoResponders bool `json:"no_responders"` -} - -// MsgHandler is a callback function that processes messages delivered to -// asynchronous subscribers. -type MsgHandler func(msg *Msg) - -// Connect will attempt to connect to the NATS system. -// The url can contain username/password semantics. e.g. nats://derek:pass@localhost:4222 -// Comma separated arrays are also supported, e.g. urlA, urlB. -// Options start with the defaults but can be overridden. -// To connect to a NATS Server's websocket port, use the `ws` or `wss` scheme, such as -// `ws://localhost:8080`. Note that websocket schemes cannot be mixed with others (nats/tls). -func Connect(url string, options ...Option) (*Conn, error) { - opts := GetDefaultOptions() - opts.Servers = processUrlString(url) - for _, opt := range options { - if opt != nil { - if err := opt(&opts); err != nil { - return nil, err - } - } - } - return opts.Connect() -} - -// Options that can be passed to Connect. - -// Name is an Option to set the client name. -func Name(name string) Option { - return func(o *Options) error { - o.Name = name - return nil - } -} - -// InProcessServer is an Option that will try to establish a direction to a NATS server -// running within the process instead of dialing via TCP. -func InProcessServer(server InProcessConnProvider) Option { - return func(o *Options) error { - o.InProcessServer = server - return nil - } -} - -// Secure is an Option to enable TLS secure connections that skip server verification by default. -// Pass a TLS Configuration for proper TLS. -// A TLS Configuration using InsecureSkipVerify should NOT be used in a production setting. -func Secure(tls ...*tls.Config) Option { - return func(o *Options) error { - o.Secure = true - // Use of variadic just simplifies testing scenarios. We only take the first one. - if len(tls) > 1 { - return ErrMultipleTLSConfigs - } - if len(tls) == 1 { - o.TLSConfig = tls[0] - } - return nil - } -} - -// RootCAs is a helper option to provide the RootCAs pool from a list of filenames. -// If Secure is not already set this will set it as well. -func RootCAs(file ...string) Option { - return func(o *Options) error { - rootCAsCB := func() (*x509.CertPool, error) { - pool := x509.NewCertPool() - for _, f := range file { - rootPEM, err := os.ReadFile(f) - if err != nil || rootPEM == nil { - return nil, fmt.Errorf("nats: error loading or parsing rootCA file: %w", err) - } - ok := pool.AppendCertsFromPEM(rootPEM) - if !ok { - return nil, fmt.Errorf("nats: failed to parse root certificate from %q", f) - } - } - return pool, nil - } - if o.TLSConfig == nil { - o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - if _, err := rootCAsCB(); err != nil { - return err - } - o.RootCAsCB = rootCAsCB - o.Secure = true - return nil - } -} - -// ClientCert is a helper option to provide the client certificate from a file. -// If Secure is not already set this will set it as well. -func ClientCert(certFile, keyFile string) Option { - return func(o *Options) error { - tlsCertCB := func() (tls.Certificate, error) { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return tls.Certificate{}, fmt.Errorf("nats: error loading client certificate: %w", err) - } - cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - return tls.Certificate{}, fmt.Errorf("nats: error parsing client certificate: %w", err) - } - return cert, nil - } - if o.TLSConfig == nil { - o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - if _, err := tlsCertCB(); err != nil { - return err - } - o.TLSCertCB = tlsCertCB - o.Secure = true - return nil - } -} - -// NoReconnect is an Option to turn off reconnect behavior. -func NoReconnect() Option { - return func(o *Options) error { - o.AllowReconnect = false - return nil - } -} - -// DontRandomize is an Option to turn off randomizing the server pool. -func DontRandomize() Option { - return func(o *Options) error { - o.NoRandomize = true - return nil - } -} - -// NoEcho is an Option to turn off messages echoing back from a server. -// Note this is supported on servers >= version 1.2. Proto 1 or greater. -func NoEcho() Option { - return func(o *Options) error { - o.NoEcho = true - return nil - } -} - -// ReconnectWait is an Option to set the wait time between reconnect attempts. -// Defaults to 2s. -func ReconnectWait(t time.Duration) Option { - return func(o *Options) error { - o.ReconnectWait = t - return nil - } -} - -// MaxReconnects is an Option to set the maximum number of reconnect attempts. -// If negative, it will never stop trying to reconnect. -// Defaults to 60. -func MaxReconnects(max int) Option { - return func(o *Options) error { - o.MaxReconnect = max - return nil - } -} - -// ReconnectJitter is an Option to set the upper bound of a random delay added ReconnectWait. -// Defaults to 100ms and 1s, respectively. -func ReconnectJitter(jitter, jitterForTLS time.Duration) Option { - return func(o *Options) error { - o.ReconnectJitter = jitter - o.ReconnectJitterTLS = jitterForTLS - return nil - } -} - -// CustomReconnectDelay is an Option to set the CustomReconnectDelayCB option. -// See CustomReconnectDelayCB Option for more details. -func CustomReconnectDelay(cb ReconnectDelayHandler) Option { - return func(o *Options) error { - o.CustomReconnectDelayCB = cb - return nil - } -} - -// PingInterval is an Option to set the period for client ping commands. -// Defaults to 2m. -func PingInterval(t time.Duration) Option { - return func(o *Options) error { - o.PingInterval = t - return nil - } -} - -// MaxPingsOutstanding is an Option to set the maximum number of ping requests -// that can go unanswered by the server before closing the connection. -// Defaults to 2. -func MaxPingsOutstanding(max int) Option { - return func(o *Options) error { - o.MaxPingsOut = max - return nil - } -} - -// ReconnectBufSize sets the buffer size of messages kept while busy reconnecting. -// Defaults to 8388608 bytes (8MB). It can be disabled by setting it to -1. -func ReconnectBufSize(size int) Option { - return func(o *Options) error { - o.ReconnectBufSize = size - return nil - } -} - -// Timeout is an Option to set the timeout for Dial on a connection. -// Defaults to 2s. -func Timeout(t time.Duration) Option { - return func(o *Options) error { - o.Timeout = t - return nil - } -} - -// FlusherTimeout is an Option to set the write (and flush) timeout on a connection. -func FlusherTimeout(t time.Duration) Option { - return func(o *Options) error { - o.FlusherTimeout = t - return nil - } -} - -// DrainTimeout is an Option to set the timeout for draining a connection. -// Defaults to 30s. -func DrainTimeout(t time.Duration) Option { - return func(o *Options) error { - o.DrainTimeout = t - return nil - } -} - -// DisconnectErrHandler is an Option to set the disconnected error handler. -func DisconnectErrHandler(cb ConnErrHandler) Option { - return func(o *Options) error { - o.DisconnectedErrCB = cb - return nil - } -} - -// DisconnectHandler is an Option to set the disconnected handler. -// DEPRECATED: Use DisconnectErrHandler. -func DisconnectHandler(cb ConnHandler) Option { - return func(o *Options) error { - o.DisconnectedCB = cb - return nil - } -} - -// ConnectHandler is an Option to set the connected handler. -func ConnectHandler(cb ConnHandler) Option { - return func(o *Options) error { - o.ConnectedCB = cb - return nil - } -} - -// ReconnectHandler is an Option to set the reconnected handler. -func ReconnectHandler(cb ConnHandler) Option { - return func(o *Options) error { - o.ReconnectedCB = cb - return nil - } -} - -// ClosedHandler is an Option to set the closed handler. -func ClosedHandler(cb ConnHandler) Option { - return func(o *Options) error { - o.ClosedCB = cb - return nil - } -} - -// DiscoveredServersHandler is an Option to set the new servers handler. -func DiscoveredServersHandler(cb ConnHandler) Option { - return func(o *Options) error { - o.DiscoveredServersCB = cb - return nil - } -} - -// ErrorHandler is an Option to set the async error handler. -func ErrorHandler(cb ErrHandler) Option { - return func(o *Options) error { - o.AsyncErrorCB = cb - return nil - } -} - -// UserInfo is an Option to set the username and password to -// use when not included directly in the URLs. -func UserInfo(user, password string) Option { - return func(o *Options) error { - o.User = user - o.Password = password - return nil - } -} - -// Token is an Option to set the token to use -// when a token is not included directly in the URLs -// and when a token handler is not provided. -func Token(token string) Option { - return func(o *Options) error { - if o.TokenHandler != nil { - return ErrTokenAlreadySet - } - o.Token = token - return nil - } -} - -// TokenHandler is an Option to set the token handler to use -// when a token is not included directly in the URLs -// and when a token is not set. -func TokenHandler(cb AuthTokenHandler) Option { - return func(o *Options) error { - if o.Token != "" { - return ErrTokenAlreadySet - } - o.TokenHandler = cb - return nil - } -} - -// UserCredentials is a convenience function that takes a filename -// for a user's JWT and a filename for the user's private Nkey seed. -func UserCredentials(userOrChainedFile string, seedFiles ...string) Option { - userCB := func() (string, error) { - return userFromFile(userOrChainedFile) - } - var keyFile string - if len(seedFiles) > 0 { - keyFile = seedFiles[0] - } else { - keyFile = userOrChainedFile - } - sigCB := func(nonce []byte) ([]byte, error) { - return sigHandler(nonce, keyFile) - } - return UserJWT(userCB, sigCB) -} - -// UserJWTAndSeed is a convenience function that takes the JWT and seed -// values as strings. -func UserJWTAndSeed(jwt string, seed string) Option { - userCB := func() (string, error) { - return jwt, nil - } - - sigCB := func(nonce []byte) ([]byte, error) { - kp, err := nkeys.FromSeed([]byte(seed)) - if err != nil { - return nil, fmt.Errorf("unable to extract key pair from seed: %w", err) - } - // Wipe our key on exit. - defer kp.Wipe() - - sig, _ := kp.Sign(nonce) - return sig, nil - } - - return UserJWT(userCB, sigCB) -} - -// UserJWT will set the callbacks to retrieve the user's JWT and -// the signature callback to sign the server nonce. This an the Nkey -// option are mutually exclusive. -func UserJWT(userCB UserJWTHandler, sigCB SignatureHandler) Option { - return func(o *Options) error { - if userCB == nil { - return ErrNoUserCB - } - if sigCB == nil { - return ErrUserButNoSigCB - } - // Smoke test the user callback to ensure it is setup properly - // when processing options. - if _, err := userCB(); err != nil { - return err - } - - o.UserJWT = userCB - o.SignatureCB = sigCB - return nil - } -} - -// Nkey will set the public Nkey and the signature callback to -// sign the server nonce. -func Nkey(pubKey string, sigCB SignatureHandler) Option { - return func(o *Options) error { - o.Nkey = pubKey - o.SignatureCB = sigCB - if pubKey != "" && sigCB == nil { - return ErrNkeyButNoSigCB - } - return nil - } -} - -// SyncQueueLen will set the maximum queue len for the internal -// channel used for SubscribeSync(). -// Defaults to 65536. -func SyncQueueLen(max int) Option { - return func(o *Options) error { - o.SubChanLen = max - return nil - } -} - -// Dialer is an Option to set the dialer which will be used when -// attempting to establish a connection. -// DEPRECATED: Should use CustomDialer instead. -func Dialer(dialer *net.Dialer) Option { - return func(o *Options) error { - o.Dialer = dialer - return nil - } -} - -// SetCustomDialer is an Option to set a custom dialer which will be -// used when attempting to establish a connection. If both Dialer -// and CustomDialer are specified, CustomDialer takes precedence. -func SetCustomDialer(dialer CustomDialer) Option { - return func(o *Options) error { - o.CustomDialer = dialer - return nil - } -} - -// UseOldRequestStyle is an Option to force usage of the old Request style. -func UseOldRequestStyle() Option { - return func(o *Options) error { - o.UseOldRequestStyle = true - return nil - } -} - -// NoCallbacksAfterClientClose is an Option to disable callbacks when user code -// calls Close(). If close is initiated by any other condition, callbacks -// if any will be invoked. -func NoCallbacksAfterClientClose() Option { - return func(o *Options) error { - o.NoCallbacksAfterClientClose = true - return nil - } -} - -// LameDuckModeHandler sets the callback to invoke when the server notifies -// the connection that it entered lame duck mode, that is, going to -// gradually disconnect all its connections before shutting down. This is -// often used in deployments when upgrading NATS Servers. -func LameDuckModeHandler(cb ConnHandler) Option { - return func(o *Options) error { - o.LameDuckModeHandler = cb - return nil - } -} - -// RetryOnFailedConnect sets the connection in reconnecting state right away -// if it can't connect to a server in the initial set. -// See RetryOnFailedConnect option for more details. -func RetryOnFailedConnect(retry bool) Option { - return func(o *Options) error { - o.RetryOnFailedConnect = retry - return nil - } -} - -// Compression is an Option to indicate if this connection supports -// compression. Currently only supported for Websocket connections. -func Compression(enabled bool) Option { - return func(o *Options) error { - o.Compression = enabled - return nil - } -} - -// ProxyPath is an option for websocket connections that adds a path to connections url. -// This is useful when connecting to NATS behind a proxy. -func ProxyPath(path string) Option { - return func(o *Options) error { - o.ProxyPath = path - return nil - } -} - -// CustomInboxPrefix configures the request + reply inbox prefix -func CustomInboxPrefix(p string) Option { - return func(o *Options) error { - if p == "" || strings.Contains(p, ">") || strings.Contains(p, "*") || strings.HasSuffix(p, ".") { - return fmt.Errorf("nats: invalid custom prefix") - } - o.InboxPrefix = p - return nil - } -} - -// IgnoreAuthErrorAbort opts out of the default connect behavior of aborting -// subsequent reconnect attempts if server returns the same auth error twice. -func IgnoreAuthErrorAbort() Option { - return func(o *Options) error { - o.IgnoreAuthErrorAbort = true - return nil - } -} - -// SkipHostLookup is an Option to skip the host lookup when connecting to a server. -func SkipHostLookup() Option { - return func(o *Options) error { - o.SkipHostLookup = true - return nil - } -} - -// TLSHandshakeFirst is an Option to perform the TLS handshake first, that is -// before receiving the INFO protocol. This requires the server to also be -// configured with such option, otherwise the connection will fail. -func TLSHandshakeFirst() Option { - return func(o *Options) error { - o.TLSHandshakeFirst = true - o.Secure = true - return nil - } -} - -// Handler processing - -// SetDisconnectHandler will set the disconnect event handler. -// DEPRECATED: Use SetDisconnectErrHandler -func (nc *Conn) SetDisconnectHandler(dcb ConnHandler) { - if nc == nil { - return - } - nc.mu.Lock() - defer nc.mu.Unlock() - nc.Opts.DisconnectedCB = dcb -} - -// SetDisconnectErrHandler will set the disconnect event handler. -func (nc *Conn) SetDisconnectErrHandler(dcb ConnErrHandler) { - if nc == nil { - return - } - nc.mu.Lock() - defer nc.mu.Unlock() - nc.Opts.DisconnectedErrCB = dcb -} - -// DisconnectErrHandler will return the disconnect event handler. -func (nc *Conn) DisconnectErrHandler() ConnErrHandler { - if nc == nil { - return nil - } - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.Opts.DisconnectedErrCB -} - -// SetReconnectHandler will set the reconnect event handler. -func (nc *Conn) SetReconnectHandler(rcb ConnHandler) { - if nc == nil { - return - } - nc.mu.Lock() - defer nc.mu.Unlock() - nc.Opts.ReconnectedCB = rcb -} - -// ReconnectHandler will return the reconnect event handler. -func (nc *Conn) ReconnectHandler() ConnHandler { - if nc == nil { - return nil - } - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.Opts.ReconnectedCB -} - -// SetDiscoveredServersHandler will set the discovered servers handler. -func (nc *Conn) SetDiscoveredServersHandler(dscb ConnHandler) { - if nc == nil { - return - } - nc.mu.Lock() - defer nc.mu.Unlock() - nc.Opts.DiscoveredServersCB = dscb -} - -// DiscoveredServersHandler will return the discovered servers handler. -func (nc *Conn) DiscoveredServersHandler() ConnHandler { - if nc == nil { - return nil - } - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.Opts.DiscoveredServersCB -} - -// SetClosedHandler will set the closed event handler. -func (nc *Conn) SetClosedHandler(cb ConnHandler) { - if nc == nil { - return - } - nc.mu.Lock() - defer nc.mu.Unlock() - nc.Opts.ClosedCB = cb -} - -// ClosedHandler will return the closed event handler. -func (nc *Conn) ClosedHandler() ConnHandler { - if nc == nil { - return nil - } - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.Opts.ClosedCB -} - -// SetErrorHandler will set the async error handler. -func (nc *Conn) SetErrorHandler(cb ErrHandler) { - if nc == nil { - return - } - nc.mu.Lock() - defer nc.mu.Unlock() - nc.Opts.AsyncErrorCB = cb -} - -// ErrorHandler will return the async error handler. -func (nc *Conn) ErrorHandler() ErrHandler { - if nc == nil { - return nil - } - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.Opts.AsyncErrorCB -} - -// Process the url string argument to Connect. -// Return an array of urls, even if only one. -func processUrlString(url string) []string { - urls := strings.Split(url, ",") - var j int - for _, s := range urls { - u := strings.TrimSpace(s) - if len(u) > 0 { - urls[j] = u - j++ - } - } - return urls[:j] -} - -// Connect will attempt to connect to a NATS server with multiple options. -func (o Options) Connect() (*Conn, error) { - nc := &Conn{Opts: o} - - // Some default options processing. - if nc.Opts.MaxPingsOut == 0 { - nc.Opts.MaxPingsOut = DefaultMaxPingOut - } - // Allow old default for channel length to work correctly. - if nc.Opts.SubChanLen == 0 { - nc.Opts.SubChanLen = DefaultMaxChanLen - } - // Default ReconnectBufSize - if nc.Opts.ReconnectBufSize == 0 { - nc.Opts.ReconnectBufSize = DefaultReconnectBufSize - } - // Ensure that Timeout is not 0 - if nc.Opts.Timeout == 0 { - nc.Opts.Timeout = DefaultTimeout - } - - // Check first for user jwt callback being defined and nkey. - if nc.Opts.UserJWT != nil && nc.Opts.Nkey != "" { - return nil, ErrNkeyAndUser - } - - // Check if we have an nkey but no signature callback defined. - if nc.Opts.Nkey != "" && nc.Opts.SignatureCB == nil { - return nil, ErrNkeyButNoSigCB - } - - // Allow custom Dialer for connecting using a timeout by default - if nc.Opts.Dialer == nil { - nc.Opts.Dialer = &net.Dialer{ - Timeout: nc.Opts.Timeout, - } - } - - // If the TLSHandshakeFirst option is specified, make sure that - // the Secure boolean is true. - if nc.Opts.TLSHandshakeFirst { - nc.Opts.Secure = true - } - - if err := nc.setupServerPool(); err != nil { - return nil, err - } - - // Create the async callback handler. - nc.ach = &asyncCallbacksHandler{} - nc.ach.cond = sync.NewCond(&nc.ach.mu) - - // Set a default error handler that will print to stderr. - if nc.Opts.AsyncErrorCB == nil { - nc.Opts.AsyncErrorCB = defaultErrHandler - } - - // Create reader/writer - nc.newReaderWriter() - - connectionEstablished, err := nc.connect() - if err != nil { - return nil, err - } - - // Spin up the async cb dispatcher on success - go nc.ach.asyncCBDispatcher() - - if connectionEstablished && nc.Opts.ConnectedCB != nil { - nc.ach.push(func() { nc.Opts.ConnectedCB(nc) }) - } - - return nc, nil -} - -func defaultErrHandler(nc *Conn, sub *Subscription, err error) { - var cid uint64 - if nc != nil { - nc.mu.RLock() - cid = nc.info.CID - nc.mu.RUnlock() - } - var errStr string - if sub != nil { - var subject string - sub.mu.Lock() - if sub.jsi != nil { - subject = sub.jsi.psubj - } else { - subject = sub.Subject - } - sub.mu.Unlock() - errStr = fmt.Sprintf("%s on connection [%d] for subscription on %q\n", err.Error(), cid, subject) - } else { - errStr = fmt.Sprintf("%s on connection [%d]\n", err.Error(), cid) - } - os.Stderr.WriteString(errStr) -} - -const ( - _CRLF_ = "\r\n" - _EMPTY_ = "" - _SPC_ = " " - _PUB_P_ = "PUB " - _HPUB_P_ = "HPUB " -) - -var _CRLF_BYTES_ = []byte(_CRLF_) - -const ( - _OK_OP_ = "+OK" - _ERR_OP_ = "-ERR" - _PONG_OP_ = "PONG" - _INFO_OP_ = "INFO" -) - -const ( - connectProto = "CONNECT %s" + _CRLF_ - pingProto = "PING" + _CRLF_ - pongProto = "PONG" + _CRLF_ - subProto = "SUB %s %s %d" + _CRLF_ - unsubProto = "UNSUB %d %s" + _CRLF_ - okProto = _OK_OP_ + _CRLF_ -) - -// Return the currently selected server -func (nc *Conn) currentServer() (int, *srv) { - for i, s := range nc.srvPool { - if s == nil { - continue - } - if s == nc.current { - return i, s - } - } - return -1, nil -} - -// Pop the current server and put onto the end of the list. Select head of list as long -// as number of reconnect attempts under MaxReconnect. -func (nc *Conn) selectNextServer() (*srv, error) { - i, s := nc.currentServer() - if i < 0 { - return nil, ErrNoServers - } - sp := nc.srvPool - num := len(sp) - copy(sp[i:num-1], sp[i+1:num]) - maxReconnect := nc.Opts.MaxReconnect - if maxReconnect < 0 || s.reconnects < maxReconnect { - nc.srvPool[num-1] = s - } else { - nc.srvPool = sp[0 : num-1] - } - if len(nc.srvPool) <= 0 { - nc.current = nil - return nil, ErrNoServers - } - nc.current = nc.srvPool[0] - return nc.srvPool[0], nil -} - -// Will assign the correct server to nc.current -func (nc *Conn) pickServer() error { - nc.current = nil - if len(nc.srvPool) <= 0 { - return ErrNoServers - } - - for _, s := range nc.srvPool { - if s != nil { - nc.current = s - return nil - } - } - return ErrNoServers -} - -const tlsScheme = "tls" - -// Create the server pool using the options given. -// We will place a Url option first, followed by any -// Server Options. We will randomize the server pool unless -// the NoRandomize flag is set. -func (nc *Conn) setupServerPool() error { - nc.srvPool = make([]*srv, 0, srvPoolSize) - nc.urls = make(map[string]struct{}, srvPoolSize) - - // Create srv objects from each url string in nc.Opts.Servers - // and add them to the pool. - for _, urlString := range nc.Opts.Servers { - if err := nc.addURLToPool(urlString, false, false); err != nil { - return err - } - } - - // Randomize if allowed to - if !nc.Opts.NoRandomize { - nc.shufflePool(0) - } - - // Normally, if this one is set, Options.Servers should not be, - // but we always allowed that, so continue to do so. - if nc.Opts.Url != _EMPTY_ { - // Add to the end of the array - if err := nc.addURLToPool(nc.Opts.Url, false, false); err != nil { - return err - } - // Then swap it with first to guarantee that Options.Url is tried first. - last := len(nc.srvPool) - 1 - if last > 0 { - nc.srvPool[0], nc.srvPool[last] = nc.srvPool[last], nc.srvPool[0] - } - } else if len(nc.srvPool) <= 0 { - // Place default URL if pool is empty. - if err := nc.addURLToPool(DefaultURL, false, false); err != nil { - return err - } - } - - // Check for Scheme hint to move to TLS mode. - for _, srv := range nc.srvPool { - if srv.url.Scheme == tlsScheme || srv.url.Scheme == wsSchemeTLS { - // FIXME(dlc), this is for all in the pool, should be case by case. - nc.Opts.Secure = true - if nc.Opts.TLSConfig == nil { - nc.Opts.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - } - } - - return nc.pickServer() -} - -// Helper function to return scheme -func (nc *Conn) connScheme() string { - if nc.ws { - if nc.Opts.Secure { - return wsSchemeTLS - } - return wsScheme - } - if nc.Opts.Secure { - return tlsScheme - } - return "nats" -} - -// Return true iff u.Hostname() is an IP address. -func hostIsIP(u *url.URL) bool { - return net.ParseIP(u.Hostname()) != nil -} - -// addURLToPool adds an entry to the server pool -func (nc *Conn) addURLToPool(sURL string, implicit, saveTLSName bool) error { - if !strings.Contains(sURL, "://") { - sURL = fmt.Sprintf("%s://%s", nc.connScheme(), sURL) - } - var ( - u *url.URL - err error - ) - for i := 0; i < 2; i++ { - u, err = url.Parse(sURL) - if err != nil { - return err - } - if u.Port() != "" { - break - } - // In case given URL is of the form "localhost:", just add - // the port number at the end, otherwise, add ":4222". - if sURL[len(sURL)-1] != ':' { - sURL += ":" - } - switch u.Scheme { - case wsScheme: - sURL += defaultWSPortString - case wsSchemeTLS: - sURL += defaultWSSPortString - default: - sURL += defaultPortString - } - } - - isWS := isWebsocketScheme(u) - // We don't support mix and match of websocket and non websocket URLs. - // If this is the first URL, then we accept and switch the global state - // to websocket. After that, we will know how to reject mixed URLs. - if len(nc.srvPool) == 0 { - nc.ws = isWS - } else if isWS && !nc.ws || !isWS && nc.ws { - return fmt.Errorf("mixing of websocket and non websocket URLs is not allowed") - } - - var tlsName string - if implicit { - curl := nc.current.url - // Check to see if we do not have a url.User but current connected - // url does. If so copy over. - if u.User == nil && curl.User != nil { - u.User = curl.User - } - // We are checking to see if we have a secure connection and are - // adding an implicit server that just has an IP. If so we will remember - // the current hostname we are connected to. - if saveTLSName && hostIsIP(u) { - tlsName = curl.Hostname() - } - } - - s := &srv{url: u, isImplicit: implicit, tlsName: tlsName} - nc.srvPool = append(nc.srvPool, s) - nc.urls[u.Host] = struct{}{} - return nil -} - -// shufflePool swaps randomly elements in the server pool -// The `offset` value indicates that the shuffling should start at -// this offset and leave the elements from [0..offset) intact. -func (nc *Conn) shufflePool(offset int) { - if len(nc.srvPool) <= offset+1 { - return - } - source := rand.NewSource(time.Now().UnixNano()) - r := rand.New(source) - for i := offset; i < len(nc.srvPool); i++ { - j := offset + r.Intn(i+1-offset) - nc.srvPool[i], nc.srvPool[j] = nc.srvPool[j], nc.srvPool[i] - } -} - -func (nc *Conn) newReaderWriter() { - nc.br = &natsReader{ - buf: make([]byte, defaultBufSize), - off: -1, - } - nc.bw = &natsWriter{ - limit: defaultBufSize, - plimit: nc.Opts.ReconnectBufSize, - } -} - -func (nc *Conn) bindToNewConn() { - bw := nc.bw - bw.w, bw.bufs = nc.newWriter(), nil - br := nc.br - br.r, br.n, br.off = nc.conn, 0, -1 -} - -func (nc *Conn) newWriter() io.Writer { - var w io.Writer = nc.conn - if nc.Opts.FlusherTimeout > 0 { - w = &timeoutWriter{conn: nc.conn, timeout: nc.Opts.FlusherTimeout} - } - return w -} - -func (w *natsWriter) appendString(str string) error { - return w.appendBufs([]byte(str)) -} - -func (w *natsWriter) appendBufs(bufs ...[]byte) error { - for _, buf := range bufs { - if len(buf) == 0 { - continue - } - if w.pending != nil { - w.pending.Write(buf) - } else { - w.bufs = append(w.bufs, buf...) - } - } - if w.pending == nil && len(w.bufs) >= w.limit { - return w.flush() - } - return nil -} - -func (w *natsWriter) writeDirect(strs ...string) error { - for _, str := range strs { - if _, err := w.w.Write([]byte(str)); err != nil { - return err - } - } - return nil -} - -func (w *natsWriter) flush() error { - // If a pending buffer is set, we don't flush. Code that needs to - // write directly to the socket, by-passing buffers during (re)connect, - // will use the writeDirect() API. - if w.pending != nil { - return nil - } - // Do not skip calling w.w.Write() here if len(w.bufs) is 0 because - // the actual writer (if websocket for instance) may have things - // to do such as sending control frames, etc.. - _, err := w.w.Write(w.bufs) - w.bufs = w.bufs[:0] - return err -} - -func (w *natsWriter) buffered() int { - if w.pending != nil { - return w.pending.Len() - } - return len(w.bufs) -} - -func (w *natsWriter) switchToPending() { - w.pending = new(bytes.Buffer) -} - -func (w *natsWriter) flushPendingBuffer() error { - if w.pending == nil || w.pending.Len() == 0 { - return nil - } - _, err := w.w.Write(w.pending.Bytes()) - // Reset the pending buffer at this point because we don't want - // to take the risk of sending duplicates or partials. - w.pending.Reset() - return err -} - -func (w *natsWriter) atLimitIfUsingPending() bool { - if w.pending == nil { - return false - } - return w.pending.Len() >= w.plimit -} - -func (w *natsWriter) doneWithPending() { - w.pending = nil -} - -// Notify the reader that we are done with the connect, where "read" operations -// happen synchronously and under the connection lock. After this point, "read" -// will be happening from the read loop, without the connection lock. -// -// Note: this runs under the connection lock. -func (r *natsReader) doneWithConnect() { - if wsr, ok := r.r.(*websocketReader); ok { - wsr.doneWithConnect() - } -} - -func (r *natsReader) Read() ([]byte, error) { - if r.off >= 0 { - off := r.off - r.off = -1 - return r.buf[off:r.n], nil - } - var err error - r.n, err = r.r.Read(r.buf) - return r.buf[:r.n], err -} - -func (r *natsReader) ReadString(delim byte) (string, error) { - var s string -build_string: - // First look if we have something in the buffer - if r.off >= 0 { - i := bytes.IndexByte(r.buf[r.off:r.n], delim) - if i >= 0 { - end := r.off + i + 1 - s += string(r.buf[r.off:end]) - r.off = end - if r.off >= r.n { - r.off = -1 - } - return s, nil - } - // We did not find the delim, so will have to read more. - s += string(r.buf[r.off:r.n]) - r.off = -1 - } - if _, err := r.Read(); err != nil { - return s, err - } - r.off = 0 - goto build_string -} - -// createConn will connect to the server and wrap the appropriate -// bufio structures. It will do the right thing when an existing -// connection is in place. -func (nc *Conn) createConn() (err error) { - if nc.Opts.Timeout < 0 { - return ErrBadTimeout - } - if _, cur := nc.currentServer(); cur == nil { - return ErrNoServers - } - - // If we have a reference to an in-process server then establish a - // connection using that. - if nc.Opts.InProcessServer != nil { - conn, err := nc.Opts.InProcessServer.InProcessConn() - if err != nil { - return fmt.Errorf("failed to get in-process connection: %w", err) - } - nc.conn = conn - nc.bindToNewConn() - return nil - } - - // We will auto-expand host names if they resolve to multiple IPs - hosts := []string{} - u := nc.current.url - - if !nc.Opts.SkipHostLookup && net.ParseIP(u.Hostname()) == nil { - addrs, _ := net.LookupHost(u.Hostname()) - for _, addr := range addrs { - hosts = append(hosts, net.JoinHostPort(addr, u.Port())) - } - } - // Fall back to what we were given. - if len(hosts) == 0 { - hosts = append(hosts, u.Host) - } - - // CustomDialer takes precedence. If not set, use Opts.Dialer which - // is set to a default *net.Dialer (in Connect()) if not explicitly - // set by the user. - dialer := nc.Opts.CustomDialer - if dialer == nil { - // We will copy and shorten the timeout if we have multiple hosts to try. - copyDialer := *nc.Opts.Dialer - copyDialer.Timeout = copyDialer.Timeout / time.Duration(len(hosts)) - dialer = ©Dialer - } - - if len(hosts) > 1 && !nc.Opts.NoRandomize { - rand.Shuffle(len(hosts), func(i, j int) { - hosts[i], hosts[j] = hosts[j], hosts[i] - }) - } - for _, host := range hosts { - nc.conn, err = dialer.Dial("tcp", host) - if err == nil { - break - } - } - if err != nil { - return err - } - - // If scheme starts with "ws" then branch out to websocket code. - if isWebsocketScheme(u) { - return nc.wsInitHandshake(u) - } - - // Reset reader/writer to this new TCP connection - nc.bindToNewConn() - return nil -} - -type skipTLSDialer interface { - SkipTLSHandshake() bool -} - -// makeTLSConn will wrap an existing Conn using TLS -func (nc *Conn) makeTLSConn() error { - if nc.Opts.CustomDialer != nil { - // we do nothing when asked to skip the TLS wrapper - sd, ok := nc.Opts.CustomDialer.(skipTLSDialer) - if ok && sd.SkipTLSHandshake() { - return nil - } - } - // Allow the user to configure their own tls.Config structure. - tlsCopy := &tls.Config{} - if nc.Opts.TLSConfig != nil { - tlsCopy = util.CloneTLSConfig(nc.Opts.TLSConfig) - } - if nc.Opts.TLSCertCB != nil { - cert, err := nc.Opts.TLSCertCB() - if err != nil { - return err - } - tlsCopy.Certificates = []tls.Certificate{cert} - } - if nc.Opts.RootCAsCB != nil { - rootCAs, err := nc.Opts.RootCAsCB() - if err != nil { - return err - } - tlsCopy.RootCAs = rootCAs - } - // If its blank we will override it with the current host - if tlsCopy.ServerName == _EMPTY_ { - if nc.current.tlsName != _EMPTY_ { - tlsCopy.ServerName = nc.current.tlsName - } else { - h, _, _ := net.SplitHostPort(nc.current.url.Host) - tlsCopy.ServerName = h - } - } - nc.conn = tls.Client(nc.conn, tlsCopy) - conn := nc.conn.(*tls.Conn) - if err := conn.Handshake(); err != nil { - return err - } - nc.bindToNewConn() - return nil -} - -// TLSConnectionState retrieves the state of the TLS connection to the server -func (nc *Conn) TLSConnectionState() (tls.ConnectionState, error) { - if !nc.isConnected() { - return tls.ConnectionState{}, ErrDisconnected - } - - nc.mu.RLock() - conn := nc.conn - nc.mu.RUnlock() - - tc, ok := conn.(*tls.Conn) - if !ok { - return tls.ConnectionState{}, ErrConnectionNotTLS - } - - return tc.ConnectionState(), nil -} - -// waitForExits will wait for all socket watcher Go routines to -// be shutdown before proceeding. -func (nc *Conn) waitForExits() { - // Kick old flusher forcefully. - select { - case nc.fch <- struct{}{}: - default: - } - - // Wait for any previous go routines. - nc.wg.Wait() -} - -// ConnectedUrl reports the connected server's URL -func (nc *Conn) ConnectedUrl() string { - if nc == nil { - return _EMPTY_ - } - - nc.mu.RLock() - defer nc.mu.RUnlock() - - if nc.status != CONNECTED { - return _EMPTY_ - } - return nc.current.url.String() -} - -// ConnectedUrlRedacted reports the connected server's URL with passwords redacted -func (nc *Conn) ConnectedUrlRedacted() string { - if nc == nil { - return _EMPTY_ - } - - nc.mu.RLock() - defer nc.mu.RUnlock() - - if nc.status != CONNECTED { - return _EMPTY_ - } - return nc.current.url.Redacted() -} - -// ConnectedAddr returns the connected server's IP -func (nc *Conn) ConnectedAddr() string { - if nc == nil { - return _EMPTY_ - } - - nc.mu.RLock() - defer nc.mu.RUnlock() - - if nc.status != CONNECTED { - return _EMPTY_ - } - return nc.conn.RemoteAddr().String() -} - -// ConnectedServerId reports the connected server's Id -func (nc *Conn) ConnectedServerId() string { - if nc == nil { - return _EMPTY_ - } - - nc.mu.RLock() - defer nc.mu.RUnlock() - - if nc.status != CONNECTED { - return _EMPTY_ - } - return nc.info.ID -} - -// ConnectedServerName reports the connected server's name -func (nc *Conn) ConnectedServerName() string { - if nc == nil { - return _EMPTY_ - } - - nc.mu.RLock() - defer nc.mu.RUnlock() - - if nc.status != CONNECTED { - return _EMPTY_ - } - return nc.info.Name -} - -var semVerRe = regexp.MustCompile(`\Av?([0-9]+)\.?([0-9]+)?\.?([0-9]+)?`) - -func versionComponents(version string) (major, minor, patch int, err error) { - m := semVerRe.FindStringSubmatch(version) - if m == nil { - return 0, 0, 0, errors.New("invalid semver") - } - major, err = strconv.Atoi(m[1]) - if err != nil { - return -1, -1, -1, err - } - minor, err = strconv.Atoi(m[2]) - if err != nil { - return -1, -1, -1, err - } - patch, err = strconv.Atoi(m[3]) - if err != nil { - return -1, -1, -1, err - } - return major, minor, patch, err -} - -// Check for minimum server requirement. -func (nc *Conn) serverMinVersion(major, minor, patch int) bool { - smajor, sminor, spatch, _ := versionComponents(nc.ConnectedServerVersion()) - if smajor < major || (smajor == major && sminor < minor) || (smajor == major && sminor == minor && spatch < patch) { - return false - } - return true -} - -// ConnectedServerVersion reports the connected server's version as a string -func (nc *Conn) ConnectedServerVersion() string { - if nc == nil { - return _EMPTY_ - } - - nc.mu.RLock() - defer nc.mu.RUnlock() - - if nc.status != CONNECTED { - return _EMPTY_ - } - return nc.info.Version -} - -// ConnectedClusterName reports the connected server's cluster name if any -func (nc *Conn) ConnectedClusterName() string { - if nc == nil { - return _EMPTY_ - } - - nc.mu.RLock() - defer nc.mu.RUnlock() - - if nc.status != CONNECTED { - return _EMPTY_ - } - return nc.info.Cluster -} - -// Low level setup for structs, etc -func (nc *Conn) setup() { - nc.subs = make(map[int64]*Subscription) - nc.pongs = make([]chan struct{}, 0, 8) - - nc.fch = make(chan struct{}, flushChanSize) - nc.rqch = make(chan struct{}) - - // Setup scratch outbound buffer for PUB/HPUB - pub := nc.scratch[:len(_HPUB_P_)] - copy(pub, _HPUB_P_) -} - -// Process a connected connection and initialize properly. -func (nc *Conn) processConnectInit() error { - - // Set our deadline for the whole connect process - nc.conn.SetDeadline(time.Now().Add(nc.Opts.Timeout)) - defer nc.conn.SetDeadline(time.Time{}) - - // Set our status to connecting. - nc.changeConnStatus(CONNECTING) - - // If we need to have a TLS connection and want the TLS handshake to occur - // first, do it now. - if nc.Opts.Secure && nc.Opts.TLSHandshakeFirst { - if err := nc.makeTLSConn(); err != nil { - return err - } - } - - // Process the INFO protocol received from the server - err := nc.processExpectedInfo() - if err != nil { - return err - } - - // Send the CONNECT protocol along with the initial PING protocol. - // Wait for the PONG response (or any error that we get from the server). - err = nc.sendConnect() - if err != nil { - return err - } - - // Reset the number of PING sent out - nc.pout = 0 - - // Start or reset Timer - if nc.Opts.PingInterval > 0 { - if nc.ptmr == nil { - nc.ptmr = time.AfterFunc(nc.Opts.PingInterval, nc.processPingTimer) - } else { - nc.ptmr.Reset(nc.Opts.PingInterval) - } - } - - // Start the readLoop and flusher go routines, we will wait on both on a reconnect event. - nc.wg.Add(2) - go nc.readLoop() - go nc.flusher() - - // Notify the reader that we are done with the connect handshake, where - // reads were done synchronously and under the connection lock. - nc.br.doneWithConnect() - - return nil -} - -// Main connect function. Will connect to the nats-server. -func (nc *Conn) connect() (bool, error) { - var err error - var connectionEstablished bool - - // Create actual socket connection - // For first connect we walk all servers in the pool and try - // to connect immediately. - nc.mu.Lock() - defer nc.mu.Unlock() - nc.initc = true - // The pool may change inside the loop iteration due to INFO protocol. - for i := 0; i < len(nc.srvPool); i++ { - nc.current = nc.srvPool[i] - - if err = nc.createConn(); err == nil { - // This was moved out of processConnectInit() because - // that function is now invoked from doReconnect() too. - nc.setup() - - err = nc.processConnectInit() - - if err == nil { - nc.current.didConnect = true - nc.current.reconnects = 0 - nc.current.lastErr = nil - break - } else { - nc.mu.Unlock() - nc.close(DISCONNECTED, false, err) - nc.mu.Lock() - // Do not reset nc.current here since it would prevent - // RetryOnFailedConnect to work should this be the last server - // to try before starting doReconnect(). - } - } else { - // Cancel out default connection refused, will trigger the - // No servers error conditional - if strings.Contains(err.Error(), "connection refused") { - err = nil - } - } - } - - if err == nil && nc.status != CONNECTED { - err = ErrNoServers - } - - if err == nil { - connectionEstablished = true - nc.initc = false - } else if nc.Opts.RetryOnFailedConnect { - nc.setup() - nc.changeConnStatus(RECONNECTING) - nc.bw.switchToPending() - go nc.doReconnect(ErrNoServers) - err = nil - } else { - nc.current = nil - } - - return connectionEstablished, err -} - -// This will check to see if the connection should be -// secure. This can be dictated from either end and should -// only be called after the INIT protocol has been received. -func (nc *Conn) checkForSecure() error { - // Check to see if we need to engage TLS - o := nc.Opts - - // Check for mismatch in setups - if o.Secure && !nc.info.TLSRequired && !nc.info.TLSAvailable { - return ErrSecureConnWanted - } else if nc.info.TLSRequired && !o.Secure { - // Switch to Secure since server needs TLS. - o.Secure = true - } - - if o.Secure { - // If TLS handshake first is true, we have already done - // the handshake, so we are done here. - if o.TLSHandshakeFirst { - return nil - } - // Need to rewrap with bufio - if err := nc.makeTLSConn(); err != nil { - return err - } - } - return nil -} - -// processExpectedInfo will look for the expected first INFO message -// sent when a connection is established. The lock should be held entering. -func (nc *Conn) processExpectedInfo() error { - - c := &control{} - - // Read the protocol - err := nc.readOp(c) - if err != nil { - return err - } - - // The nats protocol should send INFO first always. - if c.op != _INFO_OP_ { - return ErrNoInfoReceived - } - - // Parse the protocol - if err := nc.processInfo(c.args); err != nil { - return err - } - - if nc.Opts.Nkey != "" && nc.info.Nonce == "" { - return ErrNkeysNotSupported - } - - // For websocket connections, we already switched to TLS if need be, - // so we are done here. - if nc.ws { - return nil - } - - return nc.checkForSecure() -} - -// Sends a protocol control message by queuing into the bufio writer -// and kicking the flush Go routine. These writes are protected. -func (nc *Conn) sendProto(proto string) { - nc.mu.Lock() - nc.bw.appendString(proto) - nc.kickFlusher() - nc.mu.Unlock() -} - -// Generate a connect protocol message, issuing user/password if -// applicable. The lock is assumed to be held upon entering. -func (nc *Conn) connectProto() (string, error) { - o := nc.Opts - var nkey, sig, user, pass, token, ujwt string - u := nc.current.url.User - if u != nil { - // if no password, assume username is authToken - if _, ok := u.Password(); !ok { - token = u.Username() - } else { - user = u.Username() - pass, _ = u.Password() - } - } else { - // Take from options (possibly all empty strings) - user = o.User - pass = o.Password - token = o.Token - nkey = o.Nkey - } - - // Look for user jwt. - if o.UserJWT != nil { - if jwt, err := o.UserJWT(); err != nil { - return _EMPTY_, err - } else { - ujwt = jwt - } - if nkey != _EMPTY_ { - return _EMPTY_, ErrNkeyAndUser - } - } - - if ujwt != _EMPTY_ || nkey != _EMPTY_ { - if o.SignatureCB == nil { - if ujwt == _EMPTY_ { - return _EMPTY_, ErrNkeyButNoSigCB - } - return _EMPTY_, ErrUserButNoSigCB - } - sigraw, err := o.SignatureCB([]byte(nc.info.Nonce)) - if err != nil { - return _EMPTY_, fmt.Errorf("error signing nonce: %w", err) - } - sig = base64.RawURLEncoding.EncodeToString(sigraw) - } - - if nc.Opts.TokenHandler != nil { - if token != _EMPTY_ { - return _EMPTY_, ErrTokenAlreadySet - } - token = nc.Opts.TokenHandler() - } - - // If our server does not support headers then we can't do them or no responders. - hdrs := nc.info.Headers - cinfo := connectInfo{o.Verbose, o.Pedantic, ujwt, nkey, sig, user, pass, token, - o.Secure, o.Name, LangString, Version, clientProtoInfo, !o.NoEcho, hdrs, hdrs} - - b, err := json.Marshal(cinfo) - if err != nil { - return _EMPTY_, ErrJsonParse - } - - // Check if NoEcho is set and we have a server that supports it. - if o.NoEcho && nc.info.Proto < 1 { - return _EMPTY_, ErrNoEchoNotSupported - } - - return fmt.Sprintf(connectProto, b), nil -} - -// normalizeErr removes the prefix -ERR, trim spaces and remove the quotes. -func normalizeErr(line string) string { - s := strings.TrimSpace(strings.TrimPrefix(line, _ERR_OP_)) - s = strings.TrimLeft(strings.TrimRight(s, "'"), "'") - return s -} - -// natsProtoErr represents an -ERR protocol message sent by the server. -type natsProtoErr struct { - description string -} - -func (nerr *natsProtoErr) Error() string { - return fmt.Sprintf("nats: %s", nerr.description) -} - -func (nerr *natsProtoErr) Is(err error) bool { - return strings.ToLower(nerr.Error()) == err.Error() -} - -// Send a connect protocol message to the server, issue user/password if -// applicable. Will wait for a flush to return from the server for error -// processing. -func (nc *Conn) sendConnect() error { - // Construct the CONNECT protocol string - cProto, err := nc.connectProto() - if err != nil { - if !nc.initc && nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) - } - return err - } - - // Write the protocol and PING directly to the underlying writer. - if err := nc.bw.writeDirect(cProto, pingProto); err != nil { - return err - } - - // We don't want to read more than we need here, otherwise - // we would need to transfer the excess read data to the readLoop. - // Since in normal situations we just are looking for a PONG\r\n, - // reading byte-by-byte here is ok. - proto, err := nc.readProto() - if err != nil { - if !nc.initc && nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) - } - return err - } - - // If opts.Verbose is set, handle +OK - if nc.Opts.Verbose && proto == okProto { - // Read the rest now... - proto, err = nc.readProto() - if err != nil { - if !nc.initc && nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) - } - return err - } - } - - // We expect a PONG - if proto != pongProto { - // But it could be something else, like -ERR - - // Since we no longer use ReadLine(), trim the trailing "\r\n" - proto = strings.TrimRight(proto, "\r\n") - - // If it's a server error... - if strings.HasPrefix(proto, _ERR_OP_) { - // Remove -ERR, trim spaces and quotes, and convert to lower case. - proto = normalizeErr(proto) - - // Check if this is an auth error - if authErr := checkAuthError(strings.ToLower(proto)); authErr != nil { - // This will schedule an async error if we are in reconnect, - // and keep track of the auth error for the current server. - // If we have got the same error twice, this sets nc.ar to true to - // indicate that the reconnect should be aborted (will be checked - // in doReconnect()). - nc.processAuthError(authErr) - } - return &natsProtoErr{proto} - } - - // Notify that we got an unexpected protocol. - return fmt.Errorf("nats: expected '%s', got '%s'", _PONG_OP_, proto) - } - - // This is where we are truly connected. - nc.changeConnStatus(CONNECTED) - - return nil -} - -// reads a protocol line. -func (nc *Conn) readProto() (string, error) { - return nc.br.ReadString('\n') -} - -// A control protocol line. -type control struct { - op, args string -} - -// Read a control line and process the intended op. -func (nc *Conn) readOp(c *control) error { - line, err := nc.readProto() - if err != nil { - return err - } - parseControl(line, c) - return nil -} - -// Parse a control line from the server. -func parseControl(line string, c *control) { - toks := strings.SplitN(line, _SPC_, 2) - if len(toks) == 1 { - c.op = strings.TrimSpace(toks[0]) - c.args = _EMPTY_ - } else if len(toks) == 2 { - c.op, c.args = strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) - } else { - c.op = _EMPTY_ - } -} - -// flushReconnectPendingItems will push the pending items that were -// gathered while we were in a RECONNECTING state to the socket. -func (nc *Conn) flushReconnectPendingItems() error { - return nc.bw.flushPendingBuffer() -} - -// Stops the ping timer if set. -// Connection lock is held on entry. -func (nc *Conn) stopPingTimer() { - if nc.ptmr != nil { - nc.ptmr.Stop() - } -} - -// Try to reconnect using the option parameters. -// This function assumes we are allowed to reconnect. -func (nc *Conn) doReconnect(err error) { - // We want to make sure we have the other watchers shutdown properly - // here before we proceed past this point. - nc.waitForExits() - - // FIXME(dlc) - We have an issue here if we have - // outstanding flush points (pongs) and they were not - // sent out, but are still in the pipe. - - // Hold the lock manually and release where needed below, - // can't do defer here. - nc.mu.Lock() - - // Clear any errors. - nc.err = nil - // Perform appropriate callback if needed for a disconnect. - // DisconnectedErrCB has priority over deprecated DisconnectedCB - if !nc.initc { - if nc.Opts.DisconnectedErrCB != nil { - nc.ach.push(func() { nc.Opts.DisconnectedErrCB(nc, err) }) - } else if nc.Opts.DisconnectedCB != nil { - nc.ach.push(func() { nc.Opts.DisconnectedCB(nc) }) - } - } - - // This is used to wait on go routines exit if we start them in the loop - // but an error occurs after that. - waitForGoRoutines := false - var rt *time.Timer - // Channel used to kick routine out of sleep when conn is closed. - rqch := nc.rqch - // Counter that is increased when the whole list of servers has been tried. - var wlf int - - var jitter time.Duration - var rw time.Duration - // If a custom reconnect delay handler is set, this takes precedence. - crd := nc.Opts.CustomReconnectDelayCB - if crd == nil { - rw = nc.Opts.ReconnectWait - // TODO: since we sleep only after the whole list has been tried, we can't - // rely on individual *srv to know if it is a TLS or non-TLS url. - // We have to pick which type of jitter to use, for now, we use these hints: - jitter = nc.Opts.ReconnectJitter - if nc.Opts.Secure || nc.Opts.TLSConfig != nil { - jitter = nc.Opts.ReconnectJitterTLS - } - } - - for i := 0; len(nc.srvPool) > 0; { - cur, err := nc.selectNextServer() - if err != nil { - nc.err = err - break - } - - doSleep := i+1 >= len(nc.srvPool) - nc.mu.Unlock() - - if !doSleep { - i++ - // Release the lock to give a chance to a concurrent nc.Close() to break the loop. - runtime.Gosched() - } else { - i = 0 - var st time.Duration - if crd != nil { - wlf++ - st = crd(wlf) - } else { - st = rw - if jitter > 0 { - st += time.Duration(rand.Int63n(int64(jitter))) - } - } - if rt == nil { - rt = time.NewTimer(st) - } else { - rt.Reset(st) - } - select { - case <-rqch: - rt.Stop() - case <-rt.C: - } - } - // If the readLoop, etc.. go routines were started, wait for them to complete. - if waitForGoRoutines { - nc.waitForExits() - waitForGoRoutines = false - } - nc.mu.Lock() - - // Check if we have been closed first. - if nc.isClosed() { - break - } - - // Mark that we tried a reconnect - cur.reconnects++ - - // Try to create a new connection - err = nc.createConn() - - // Not yet connected, retry... - // Continue to hold the lock - if err != nil { - nc.err = nil - continue - } - - // We are reconnected - nc.Reconnects++ - - // Process connect logic - if nc.err = nc.processConnectInit(); nc.err != nil { - // Check if we should abort reconnect. If so, break out - // of the loop and connection will be closed. - if nc.ar { - break - } - nc.changeConnStatus(RECONNECTING) - continue - } - - // Clear possible lastErr under the connection lock after - // a successful processConnectInit(). - nc.current.lastErr = nil - - // Clear out server stats for the server we connected to.. - cur.didConnect = true - cur.reconnects = 0 - - // Send existing subscription state - nc.resendSubscriptions() - - // Now send off and clear pending buffer - nc.err = nc.flushReconnectPendingItems() - if nc.err != nil { - nc.changeConnStatus(RECONNECTING) - // Stop the ping timer (if set) - nc.stopPingTimer() - // Since processConnectInit() returned without error, the - // go routines were started, so wait for them to return - // on the next iteration (after releasing the lock). - waitForGoRoutines = true - continue - } - - // Done with the pending buffer - nc.bw.doneWithPending() - - // This is where we are truly connected. - nc.status = CONNECTED - - // If we are here with a retry on failed connect, indicate that the - // initial connect is now complete. - nc.initc = false - - // Queue up the reconnect callback. - if nc.Opts.ReconnectedCB != nil { - nc.ach.push(func() { nc.Opts.ReconnectedCB(nc) }) - } - - // Release lock here, we will return below. - nc.mu.Unlock() - - // Make sure to flush everything - nc.Flush() - - return - } - - // Call into close.. We have no servers left.. - if nc.err == nil { - nc.err = ErrNoServers - } - nc.mu.Unlock() - nc.close(CLOSED, true, nil) -} - -// processOpErr handles errors from reading or parsing the protocol. -// The lock should not be held entering this function. -func (nc *Conn) processOpErr(err error) { - nc.mu.Lock() - if nc.isConnecting() || nc.isClosed() || nc.isReconnecting() { - nc.mu.Unlock() - return - } - - if nc.Opts.AllowReconnect && nc.status == CONNECTED { - // Set our new status - nc.changeConnStatus(RECONNECTING) - // Stop ping timer if set - nc.stopPingTimer() - if nc.conn != nil { - nc.conn.Close() - nc.conn = nil - } - - // Create pending buffer before reconnecting. - nc.bw.switchToPending() - - // Clear any queued pongs, e.g. pending flush calls. - nc.clearPendingFlushCalls() - - go nc.doReconnect(err) - nc.mu.Unlock() - return - } - - nc.changeConnStatus(DISCONNECTED) - nc.err = err - nc.mu.Unlock() - nc.close(CLOSED, true, nil) -} - -// dispatch is responsible for calling any async callbacks -func (ac *asyncCallbacksHandler) asyncCBDispatcher() { - for { - ac.mu.Lock() - // Protect for spurious wakeups. We should get out of the - // wait only if there is an element to pop from the list. - for ac.head == nil { - ac.cond.Wait() - } - cur := ac.head - ac.head = cur.next - if cur == ac.tail { - ac.tail = nil - } - ac.mu.Unlock() - - // This signals that the dispatcher has been closed and all - // previous callbacks have been dispatched. - if cur.f == nil { - return - } - // Invoke callback outside of handler's lock - cur.f() - } -} - -// Add the given function to the tail of the list and -// signals the dispatcher. -func (ac *asyncCallbacksHandler) push(f func()) { - ac.pushOrClose(f, false) -} - -// Signals that we are closing... -func (ac *asyncCallbacksHandler) close() { - ac.pushOrClose(nil, true) -} - -// Add the given function to the tail of the list and -// signals the dispatcher. -func (ac *asyncCallbacksHandler) pushOrClose(f func(), close bool) { - ac.mu.Lock() - defer ac.mu.Unlock() - // Make sure that library is not calling push with nil function, - // since this is used to notify the dispatcher that it should stop. - if !close && f == nil { - panic("pushing a nil callback") - } - cb := &asyncCB{f: f} - if ac.tail != nil { - ac.tail.next = cb - } else { - ac.head = cb - } - ac.tail = cb - if close { - ac.cond.Broadcast() - } else { - ac.cond.Signal() - } -} - -// readLoop() will sit on the socket reading and processing the -// protocol from the server. It will dispatch appropriately based -// on the op type. -func (nc *Conn) readLoop() { - // Release the wait group on exit - defer nc.wg.Done() - - // Create a parseState if needed. - nc.mu.Lock() - if nc.ps == nil { - nc.ps = &parseState{} - } - conn := nc.conn - br := nc.br - nc.mu.Unlock() - - if conn == nil { - return - } - - for { - buf, err := br.Read() - if err == nil { - // With websocket, it is possible that there is no error but - // also no buffer returned (either WS control message or read of a - // partial compressed message). We could call parse(buf) which - // would ignore an empty buffer, but simply go back to top of the loop. - if len(buf) == 0 { - continue - } - err = nc.parse(buf) - } - if err != nil { - nc.processOpErr(err) - break - } - } - // Clear the parseState here.. - nc.mu.Lock() - nc.ps = nil - nc.mu.Unlock() -} - -// waitForMsgs waits on the conditional shared with readLoop and processMsg. -// It is used to deliver messages to asynchronous subscribers. -func (nc *Conn) waitForMsgs(s *Subscription) { - var closed bool - var delivered, max uint64 - - // Used to account for adjustments to sub.pBytes when we wrap back around. - msgLen := -1 - - for { - s.mu.Lock() - // Do accounting for last msg delivered here so we only lock once - // and drain state trips after callback has returned. - if msgLen >= 0 { - s.pMsgs-- - s.pBytes -= msgLen - msgLen = -1 - } - - if s.pHead == nil && !s.closed { - s.pCond.Wait() - } - // Pop the msg off the list - m := s.pHead - if m != nil { - s.pHead = m.next - if s.pHead == nil { - s.pTail = nil - } - if m.barrier != nil { - s.mu.Unlock() - if atomic.AddInt64(&m.barrier.refs, -1) == 0 { - m.barrier.f() - } - continue - } - msgLen = len(m.Data) - } - mcb := s.mcb - max = s.max - closed = s.closed - var fcReply string - if !s.closed { - s.delivered++ - delivered = s.delivered - if s.jsi != nil { - fcReply = s.checkForFlowControlResponse() - } - } - s.mu.Unlock() - - // Respond to flow control if applicable - if fcReply != _EMPTY_ { - nc.Publish(fcReply, nil) - } - - if closed { - break - } - - // Deliver the message. - if m != nil && (max == 0 || delivered <= max) { - mcb(m) - } - // If we have hit the max for delivered msgs, remove sub. - if max > 0 && delivered >= max { - nc.mu.Lock() - nc.removeSub(s) - nc.mu.Unlock() - break - } - } - // Check for barrier messages - s.mu.Lock() - for m := s.pHead; m != nil; m = s.pHead { - if m.barrier != nil { - s.mu.Unlock() - if atomic.AddInt64(&m.barrier.refs, -1) == 0 { - m.barrier.f() - } - s.mu.Lock() - } - s.pHead = m.next - } - // Now check for pDone - done := s.pDone - s.mu.Unlock() - - if done != nil { - done(s.Subject) - } -} - -// Used for debugging and simulating loss for certain tests. -// Return what is to be used. If we return nil the message will be dropped. -type msgFilter func(m *Msg) *Msg - -// processMsg is called by parse and will place the msg on the -// appropriate channel/pending queue for processing. If the channel is full, -// or the pending queue is over the pending limits, the connection is -// considered a slow consumer. -func (nc *Conn) processMsg(data []byte) { - // Stats - atomic.AddUint64(&nc.InMsgs, 1) - atomic.AddUint64(&nc.InBytes, uint64(len(data))) - - // Don't lock the connection to avoid server cutting us off if the - // flusher is holding the connection lock, trying to send to the server - // that is itself trying to send data to us. - nc.subsMu.RLock() - sub := nc.subs[nc.ps.ma.sid] - var mf msgFilter - if nc.filters != nil { - mf = nc.filters[string(nc.ps.ma.subject)] - } - nc.subsMu.RUnlock() - - if sub == nil { - return - } - - // Copy them into string - subj := string(nc.ps.ma.subject) - reply := string(nc.ps.ma.reply) - - // Doing message create outside of the sub's lock to reduce contention. - // It's possible that we end-up not using the message, but that's ok. - - // FIXME(dlc): Need to copy, should/can do COW? - var msgPayload = data - if !nc.ps.msgCopied { - msgPayload = make([]byte, len(data)) - copy(msgPayload, data) - } - - // Check if we have headers encoded here. - var h Header - var err error - var ctrlMsg bool - var ctrlType int - var fcReply string - - if nc.ps.ma.hdr > 0 { - hbuf := msgPayload[:nc.ps.ma.hdr] - msgPayload = msgPayload[nc.ps.ma.hdr:] - h, err = DecodeHeadersMsg(hbuf) - if err != nil { - // We will pass the message through but send async error. - nc.mu.Lock() - nc.err = ErrBadHeaderMsg - if nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, sub, ErrBadHeaderMsg) }) - } - nc.mu.Unlock() - } - } - - // FIXME(dlc): Should we recycle these containers? - m := &Msg{ - Subject: subj, - Reply: reply, - Header: h, - Data: msgPayload, - Sub: sub, - wsz: len(data) + len(subj) + len(reply), - } - - // Check for message filters. - if mf != nil { - if m = mf(m); m == nil { - // Drop message. - return - } - } - - sub.mu.Lock() - - // Check if closed. - if sub.closed { - sub.mu.Unlock() - return - } - - // Skip flow control messages in case of using a JetStream context. - jsi := sub.jsi - if jsi != nil { - // There has to be a header for it to be a control message. - if h != nil { - ctrlMsg, ctrlType = isJSControlMessage(m) - if ctrlMsg && ctrlType == jsCtrlHB { - // Check if the heartbeat has a "Consumer Stalled" header, if - // so, the value is the FC reply to send a nil message to. - // We will send it at the end of this function. - fcReply = m.Header.Get(consumerStalledHdr) - } - } - // Check for ordered consumer here. If checkOrderedMsgs returns true that means it detected a gap. - if !ctrlMsg && jsi.ordered && sub.checkOrderedMsgs(m) { - sub.mu.Unlock() - return - } - } - - // Skip processing if this is a control message and - // if not a pull consumer heartbeat. For pull consumers, - // heartbeats have to be handled on per request basis. - if !ctrlMsg || (jsi != nil && jsi.pull) { - var chanSubCheckFC bool - // Subscription internal stats (applicable only for non ChanSubscription's) - if sub.typ != ChanSubscription { - sub.pMsgs++ - if sub.pMsgs > sub.pMsgsMax { - sub.pMsgsMax = sub.pMsgs - } - sub.pBytes += len(m.Data) - if sub.pBytes > sub.pBytesMax { - sub.pBytesMax = sub.pBytes - } - - // Check for a Slow Consumer - if (sub.pMsgsLimit > 0 && sub.pMsgs > sub.pMsgsLimit) || - (sub.pBytesLimit > 0 && sub.pBytes > sub.pBytesLimit) { - goto slowConsumer - } - } else if jsi != nil { - chanSubCheckFC = true - } - - // We have two modes of delivery. One is the channel, used by channel - // subscribers and syncSubscribers, the other is a linked list for async. - if sub.mch != nil { - select { - case sub.mch <- m: - default: - goto slowConsumer - } - } else { - // Push onto the async pList - if sub.pHead == nil { - sub.pHead = m - sub.pTail = m - if sub.pCond != nil { - sub.pCond.Signal() - } - } else { - sub.pTail.next = m - sub.pTail = m - } - } - if jsi != nil { - // Store the ACK metadata from the message to - // compare later on with the received heartbeat. - sub.trackSequences(m.Reply) - if chanSubCheckFC { - // For ChanSubscription, since we can't call this when a message - // is "delivered" (since user is pull from their own channel), - // we have a go routine that does this check, however, we do it - // also here to make it much more responsive. The go routine is - // really to avoid stalling when there is no new messages coming. - fcReply = sub.checkForFlowControlResponse() - } - } - } else if ctrlType == jsCtrlFC && m.Reply != _EMPTY_ { - // This is a flow control message. - // We will schedule the send of the FC reply once we have delivered the - // DATA message that was received before this flow control message, which - // has sequence `jsi.fciseq`. However, it is possible that this message - // has already been delivered, in that case, we need to send the FC reply now. - if sub.getJSDelivered() >= jsi.fciseq { - fcReply = m.Reply - } else { - // Schedule a reply after the previous message is delivered. - sub.scheduleFlowControlResponse(m.Reply) - } - } - - // Clear any SlowConsumer status. - sub.sc = false - sub.mu.Unlock() - - if fcReply != _EMPTY_ { - nc.Publish(fcReply, nil) - } - - // Handle control heartbeat messages. - if ctrlMsg && ctrlType == jsCtrlHB && m.Reply == _EMPTY_ { - nc.checkForSequenceMismatch(m, sub, jsi) - } - - return - -slowConsumer: - sub.dropped++ - sc := !sub.sc - sub.sc = true - // Undo stats from above - if sub.typ != ChanSubscription { - sub.pMsgs-- - sub.pBytes -= len(m.Data) - } - sub.mu.Unlock() - if sc { - // Now we need connection's lock and we may end-up in the situation - // that we were trying to avoid, except that in this case, the client - // is already experiencing client-side slow consumer situation. - nc.mu.Lock() - nc.err = ErrSlowConsumer - if nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, sub, ErrSlowConsumer) }) - } - nc.mu.Unlock() - } -} - -// processPermissionsViolation is called when the server signals a subject -// permissions violation on either publish or subscribe. -func (nc *Conn) processPermissionsViolation(err string) { - nc.mu.Lock() - // create error here so we can pass it as a closure to the async cb dispatcher. - e := errors.New("nats: " + err) - nc.err = e - if nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, e) }) - } - nc.mu.Unlock() -} - -// processAuthError generally processing for auth errors. We want to do retries -// unless we get the same error again. This allows us for instance to swap credentials -// and have the app reconnect, but if nothing is changing we should bail. -// This function will return true if the connection should be closed, false otherwise. -// Connection lock is held on entry -func (nc *Conn) processAuthError(err error) bool { - nc.err = err - if !nc.initc && nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) - } - // We should give up if we tried twice on this server and got the - // same error. This behavior can be modified using IgnoreAuthErrorAbort. - if nc.current.lastErr == err && !nc.Opts.IgnoreAuthErrorAbort { - nc.ar = true - } else { - nc.current.lastErr = err - } - return nc.ar -} - -// flusher is a separate Go routine that will process flush requests for the write -// bufio. This allows coalescing of writes to the underlying socket. -func (nc *Conn) flusher() { - // Release the wait group - defer nc.wg.Done() - - // snapshot the bw and conn since they can change from underneath of us. - nc.mu.Lock() - bw := nc.bw - conn := nc.conn - fch := nc.fch - nc.mu.Unlock() - - if conn == nil || bw == nil { - return - } - - for { - if _, ok := <-fch; !ok { - return - } - nc.mu.Lock() - - // Check to see if we should bail out. - if !nc.isConnected() || nc.isConnecting() || conn != nc.conn { - nc.mu.Unlock() - return - } - if bw.buffered() > 0 { - if err := bw.flush(); err != nil { - if nc.err == nil { - nc.err = err - } - if nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) - } - } - } - nc.mu.Unlock() - } -} - -// processPing will send an immediate pong protocol response to the -// server. The server uses this mechanism to detect dead clients. -func (nc *Conn) processPing() { - nc.sendProto(pongProto) -} - -// processPong is used to process responses to the client's ping -// messages. We use pings for the flush mechanism as well. -func (nc *Conn) processPong() { - var ch chan struct{} - - nc.mu.Lock() - if len(nc.pongs) > 0 { - ch = nc.pongs[0] - nc.pongs = append(nc.pongs[:0], nc.pongs[1:]...) - } - nc.pout = 0 - nc.mu.Unlock() - if ch != nil { - ch <- struct{}{} - } -} - -// processOK is a placeholder for processing OK messages. -func (nc *Conn) processOK() { - // do nothing -} - -// processInfo is used to parse the info messages sent -// from the server. -// This function may update the server pool. -func (nc *Conn) processInfo(info string) error { - if info == _EMPTY_ { - return nil - } - var ncInfo serverInfo - if err := json.Unmarshal([]byte(info), &ncInfo); err != nil { - return err - } - - // Copy content into connection's info structure. - nc.info = ncInfo - // The array could be empty/not present on initial connect, - // if advertise is disabled on that server, or servers that - // did not include themselves in the async INFO protocol. - // If empty, do not remove the implicit servers from the pool. - if len(nc.info.ConnectURLs) == 0 { - if !nc.initc && ncInfo.LameDuckMode && nc.Opts.LameDuckModeHandler != nil { - nc.ach.push(func() { nc.Opts.LameDuckModeHandler(nc) }) - } - return nil - } - // Note about pool randomization: when the pool was first created, - // it was randomized (if allowed). We keep the order the same (removing - // implicit servers that are no longer sent to us). New URLs are sent - // to us in no specific order so don't need extra randomization. - hasNew := false - // This is what we got from the server we are connected to. - urls := nc.info.ConnectURLs - // Transform that to a map for easy lookups - tmp := make(map[string]struct{}, len(urls)) - for _, curl := range urls { - tmp[curl] = struct{}{} - } - // Walk the pool and removed the implicit servers that are no longer in the - // given array/map - sp := nc.srvPool - for i := 0; i < len(sp); i++ { - srv := sp[i] - curl := srv.url.Host - // Check if this URL is in the INFO protocol - _, inInfo := tmp[curl] - // Remove from the temp map so that at the end we are left with only - // new (or restarted) servers that need to be added to the pool. - delete(tmp, curl) - // Keep servers that were set through Options, but also the one that - // we are currently connected to (even if it is a discovered server). - if !srv.isImplicit || srv.url == nc.current.url { - continue - } - if !inInfo { - // Remove from server pool. Keep current order. - copy(sp[i:], sp[i+1:]) - nc.srvPool = sp[:len(sp)-1] - sp = nc.srvPool - i-- - } - } - // Figure out if we should save off the current non-IP hostname if we encounter a bare IP. - saveTLS := nc.current != nil && !hostIsIP(nc.current.url) - - // If there are any left in the tmp map, these are new (or restarted) servers - // and need to be added to the pool. - for curl := range tmp { - // Before adding, check if this is a new (as in never seen) URL. - // This is used to figure out if we invoke the DiscoveredServersCB - if _, present := nc.urls[curl]; !present { - hasNew = true - } - nc.addURLToPool(fmt.Sprintf("%s://%s", nc.connScheme(), curl), true, saveTLS) - } - if hasNew { - // Randomize the pool if allowed but leave the first URL in place. - if !nc.Opts.NoRandomize { - nc.shufflePool(1) - } - if !nc.initc && nc.Opts.DiscoveredServersCB != nil { - nc.ach.push(func() { nc.Opts.DiscoveredServersCB(nc) }) - } - } - if !nc.initc && ncInfo.LameDuckMode && nc.Opts.LameDuckModeHandler != nil { - nc.ach.push(func() { nc.Opts.LameDuckModeHandler(nc) }) - } - return nil -} - -// processAsyncInfo does the same than processInfo, but is called -// from the parser. Calls processInfo under connection's lock -// protection. -func (nc *Conn) processAsyncInfo(info []byte) { - nc.mu.Lock() - // Ignore errors, we will simply not update the server pool... - nc.processInfo(string(info)) - nc.mu.Unlock() -} - -// LastError reports the last error encountered via the connection. -// It can be used reliably within ClosedCB in order to find out reason -// why connection was closed for example. -func (nc *Conn) LastError() error { - if nc == nil { - return ErrInvalidConnection - } - nc.mu.RLock() - err := nc.err - nc.mu.RUnlock() - return err -} - -// Check if the given error string is an auth error, and if so returns -// the corresponding ErrXXX error, nil otherwise -func checkAuthError(e string) error { - if strings.HasPrefix(e, AUTHORIZATION_ERR) { - return ErrAuthorization - } - if strings.HasPrefix(e, AUTHENTICATION_EXPIRED_ERR) { - return ErrAuthExpired - } - if strings.HasPrefix(e, AUTHENTICATION_REVOKED_ERR) { - return ErrAuthRevoked - } - if strings.HasPrefix(e, ACCOUNT_AUTHENTICATION_EXPIRED_ERR) { - return ErrAccountAuthExpired - } - return nil -} - -// processErr processes any error messages from the server and -// sets the connection's LastError. -func (nc *Conn) processErr(ie string) { - // Trim, remove quotes - ne := normalizeErr(ie) - // convert to lower case. - e := strings.ToLower(ne) - - close := false - - // FIXME(dlc) - process Slow Consumer signals special. - if e == STALE_CONNECTION { - nc.processOpErr(ErrStaleConnection) - } else if e == MAX_CONNECTIONS_ERR { - nc.processOpErr(ErrMaxConnectionsExceeded) - } else if strings.HasPrefix(e, PERMISSIONS_ERR) { - nc.processPermissionsViolation(ne) - } else if authErr := checkAuthError(e); authErr != nil { - nc.mu.Lock() - close = nc.processAuthError(authErr) - nc.mu.Unlock() - } else { - close = true - nc.mu.Lock() - nc.err = errors.New("nats: " + ne) - nc.mu.Unlock() - } - if close { - nc.close(CLOSED, true, nil) - } -} - -// kickFlusher will send a bool on a channel to kick the -// flush Go routine to flush data to the server. -func (nc *Conn) kickFlusher() { - if nc.bw != nil { - select { - case nc.fch <- struct{}{}: - default: - } - } -} - -// Publish publishes the data argument to the given subject. The data -// argument is left untouched and needs to be correctly interpreted on -// the receiver. -func (nc *Conn) Publish(subj string, data []byte) error { - return nc.publish(subj, _EMPTY_, nil, data) -} - -// Header represents the optional Header for a NATS message, -// based on the implementation of http.Header. -type Header map[string][]string - -// Add adds the key, value pair to the header. It is case-sensitive -// and appends to any existing values associated with key. -func (h Header) Add(key, value string) { - h[key] = append(h[key], value) -} - -// Set sets the header entries associated with key to the single -// element value. It is case-sensitive and replaces any existing -// values associated with key. -func (h Header) Set(key, value string) { - h[key] = []string{value} -} - -// Get gets the first value associated with the given key. -// It is case-sensitive. -func (h Header) Get(key string) string { - if h == nil { - return _EMPTY_ - } - if v := h[key]; v != nil { - return v[0] - } - return _EMPTY_ -} - -// Values returns all values associated with the given key. -// It is case-sensitive. -func (h Header) Values(key string) []string { - return h[key] -} - -// Del deletes the values associated with a key. -// It is case-sensitive. -func (h Header) Del(key string) { - delete(h, key) -} - -// NewMsg creates a message for publishing that will use headers. -func NewMsg(subject string) *Msg { - return &Msg{ - Subject: subject, - Header: make(Header), - } -} - -const ( - hdrLine = "NATS/1.0\r\n" - crlf = "\r\n" - hdrPreEnd = len(hdrLine) - len(crlf) - statusHdr = "Status" - descrHdr = "Description" - lastConsumerSeqHdr = "Nats-Last-Consumer" - lastStreamSeqHdr = "Nats-Last-Stream" - consumerStalledHdr = "Nats-Consumer-Stalled" - noResponders = "503" - noMessagesSts = "404" - reqTimeoutSts = "408" - jetStream409Sts = "409" - controlMsg = "100" - statusLen = 3 // e.g. 20x, 40x, 50x -) - -// DecodeHeadersMsg will decode and headers. -func DecodeHeadersMsg(data []byte) (Header, error) { - br := bufio.NewReaderSize(bytes.NewReader(data), 128) - tp := textproto.NewReader(br) - l, err := tp.ReadLine() - if err != nil || len(l) < hdrPreEnd || l[:hdrPreEnd] != hdrLine[:hdrPreEnd] { - return nil, ErrBadHeaderMsg - } - - mh, err := readMIMEHeader(tp) - if err != nil { - return nil, err - } - - // Check if we have an inlined status. - if len(l) > hdrPreEnd { - var description string - status := strings.TrimSpace(l[hdrPreEnd:]) - if len(status) != statusLen { - description = strings.TrimSpace(status[statusLen:]) - status = status[:statusLen] - } - mh.Add(statusHdr, status) - if len(description) > 0 { - mh.Add(descrHdr, description) - } - } - return Header(mh), nil -} - -// readMIMEHeader returns a MIMEHeader that preserves the -// original case of the MIME header, based on the implementation -// of textproto.ReadMIMEHeader. -// -// https://golang.org/pkg/net/textproto/#Reader.ReadMIMEHeader -func readMIMEHeader(tp *textproto.Reader) (textproto.MIMEHeader, error) { - m := make(textproto.MIMEHeader) - for { - kv, err := tp.ReadLine() - if len(kv) == 0 { - return m, err - } - - // Process key fetching original case. - i := bytes.IndexByte([]byte(kv), ':') - if i < 0 { - return nil, ErrBadHeaderMsg - } - key := kv[:i] - if key == "" { - // Skip empty keys. - continue - } - i++ - for i < len(kv) && (kv[i] == ' ' || kv[i] == '\t') { - i++ - } - value := string(kv[i:]) - m[key] = append(m[key], value) - if err != nil { - return m, err - } - } -} - -// PublishMsg publishes the Msg structure, which includes the -// Subject, an optional Reply and an optional Data field. -func (nc *Conn) PublishMsg(m *Msg) error { - if m == nil { - return ErrInvalidMsg - } - hdr, err := m.headerBytes() - if err != nil { - return err - } - return nc.publish(m.Subject, m.Reply, hdr, m.Data) -} - -// PublishRequest will perform a Publish() expecting a response on the -// reply subject. Use Request() for automatically waiting for a response -// inline. -func (nc *Conn) PublishRequest(subj, reply string, data []byte) error { - return nc.publish(subj, reply, nil, data) -} - -// Used for handrolled Itoa -const digits = "0123456789" - -// publish is the internal function to publish messages to a nats-server. -// Sends a protocol data message by queuing into the bufio writer -// and kicking the flush go routine. These writes should be protected. -func (nc *Conn) publish(subj, reply string, hdr, data []byte) error { - if nc == nil { - return ErrInvalidConnection - } - if subj == "" { - return ErrBadSubject - } - nc.mu.Lock() - - // Check if headers attempted to be sent to server that does not support them. - if len(hdr) > 0 && !nc.info.Headers { - nc.mu.Unlock() - return ErrHeadersNotSupported - } - - if nc.isClosed() { - nc.mu.Unlock() - return ErrConnectionClosed - } - - if nc.isDrainingPubs() { - nc.mu.Unlock() - return ErrConnectionDraining - } - - // Proactively reject payloads over the threshold set by server. - msgSize := int64(len(data) + len(hdr)) - // Skip this check if we are not yet connected (RetryOnFailedConnect) - if !nc.initc && msgSize > nc.info.MaxPayload { - nc.mu.Unlock() - return ErrMaxPayload - } - - // Check if we are reconnecting, and if so check if - // we have exceeded our reconnect outbound buffer limits. - if nc.bw.atLimitIfUsingPending() { - nc.mu.Unlock() - return ErrReconnectBufExceeded - } - - var mh []byte - if hdr != nil { - mh = nc.scratch[:len(_HPUB_P_)] - } else { - mh = nc.scratch[1:len(_HPUB_P_)] - } - mh = append(mh, subj...) - mh = append(mh, ' ') - if reply != "" { - mh = append(mh, reply...) - mh = append(mh, ' ') - } - - // We could be smarter here, but simple loop is ok, - // just avoid strconv in fast path. - // FIXME(dlc) - Find a better way here. - // msgh = strconv.AppendInt(msgh, int64(len(data)), 10) - // go 1.14 some values strconv faster, may be able to switch over. - - var b [12]byte - var i = len(b) - - if hdr != nil { - if len(hdr) > 0 { - for l := len(hdr); l > 0; l /= 10 { - i-- - b[i] = digits[l%10] - } - } else { - i-- - b[i] = digits[0] - } - mh = append(mh, b[i:]...) - mh = append(mh, ' ') - // reset for below. - i = len(b) - } - - if msgSize > 0 { - for l := msgSize; l > 0; l /= 10 { - i-- - b[i] = digits[l%10] - } - } else { - i-- - b[i] = digits[0] - } - - mh = append(mh, b[i:]...) - mh = append(mh, _CRLF_...) - - if err := nc.bw.appendBufs(mh, hdr, data, _CRLF_BYTES_); err != nil { - nc.mu.Unlock() - return err - } - - nc.OutMsgs++ - nc.OutBytes += uint64(len(data) + len(hdr)) - - if len(nc.fch) == 0 { - nc.kickFlusher() - } - nc.mu.Unlock() - return nil -} - -// respHandler is the global response handler. It will look up -// the appropriate channel based on the last token and place -// the message on the channel if possible. -func (nc *Conn) respHandler(m *Msg) { - nc.mu.Lock() - - // Just return if closed. - if nc.isClosed() { - nc.mu.Unlock() - return - } - - var mch chan *Msg - - // Grab mch - rt := nc.respToken(m.Subject) - if rt != _EMPTY_ { - mch = nc.respMap[rt] - // Delete the key regardless, one response only. - delete(nc.respMap, rt) - } else if len(nc.respMap) == 1 { - // If the server has rewritten the subject, the response token (rt) - // will not match (could be the case with JetStream). If that is the - // case and there is a single entry, use that. - for k, v := range nc.respMap { - mch = v - delete(nc.respMap, k) - break - } - } - nc.mu.Unlock() - - // Don't block, let Request timeout instead, mch is - // buffered and we should delete the key before a - // second response is processed. - select { - case mch <- m: - default: - return - } -} - -// Helper to setup and send new request style requests. Return the chan to receive the response. -func (nc *Conn) createNewRequestAndSend(subj string, hdr, data []byte) (chan *Msg, string, error) { - nc.mu.Lock() - // Do setup for the new style if needed. - if nc.respMap == nil { - nc.initNewResp() - } - // Create new literal Inbox and map to a chan msg. - mch := make(chan *Msg, RequestChanLen) - respInbox := nc.newRespInbox() - token := respInbox[nc.respSubLen:] - - nc.respMap[token] = mch - if nc.respMux == nil { - // Create the response subscription we will use for all new style responses. - // This will be on an _INBOX with an additional terminal token. The subscription - // will be on a wildcard. - s, err := nc.subscribeLocked(nc.respSub, _EMPTY_, nc.respHandler, nil, false, nil) - if err != nil { - nc.mu.Unlock() - return nil, token, err - } - nc.respScanf = strings.Replace(nc.respSub, "*", "%s", -1) - nc.respMux = s - } - nc.mu.Unlock() - - if err := nc.publish(subj, respInbox, hdr, data); err != nil { - return nil, token, err - } - - return mch, token, nil -} - -// RequestMsg will send a request payload including optional headers and deliver -// the response message, or an error, including a timeout if no message was received properly. -func (nc *Conn) RequestMsg(msg *Msg, timeout time.Duration) (*Msg, error) { - if msg == nil { - return nil, ErrInvalidMsg - } - hdr, err := msg.headerBytes() - if err != nil { - return nil, err - } - - return nc.request(msg.Subject, hdr, msg.Data, timeout) -} - -// Request will send a request payload and deliver the response message, -// or an error, including a timeout if no message was received properly. -func (nc *Conn) Request(subj string, data []byte, timeout time.Duration) (*Msg, error) { - return nc.request(subj, nil, data, timeout) -} - -func (nc *Conn) useOldRequestStyle() bool { - nc.mu.RLock() - r := nc.Opts.UseOldRequestStyle - nc.mu.RUnlock() - return r -} - -func (nc *Conn) request(subj string, hdr, data []byte, timeout time.Duration) (*Msg, error) { - if nc == nil { - return nil, ErrInvalidConnection - } - - var m *Msg - var err error - - if nc.useOldRequestStyle() { - m, err = nc.oldRequest(subj, hdr, data, timeout) - } else { - m, err = nc.newRequest(subj, hdr, data, timeout) - } - - // Check for no responder status. - if err == nil && len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders { - m, err = nil, ErrNoResponders - } - return m, err -} - -func (nc *Conn) newRequest(subj string, hdr, data []byte, timeout time.Duration) (*Msg, error) { - mch, token, err := nc.createNewRequestAndSend(subj, hdr, data) - if err != nil { - return nil, err - } - - t := globalTimerPool.Get(timeout) - defer globalTimerPool.Put(t) - - var ok bool - var msg *Msg - - select { - case msg, ok = <-mch: - if !ok { - return nil, ErrConnectionClosed - } - case <-t.C: - nc.mu.Lock() - delete(nc.respMap, token) - nc.mu.Unlock() - return nil, ErrTimeout - } - - return msg, nil -} - -// oldRequest will create an Inbox and perform a Request() call -// with the Inbox reply and return the first reply received. -// This is optimized for the case of multiple responses. -func (nc *Conn) oldRequest(subj string, hdr, data []byte, timeout time.Duration) (*Msg, error) { - inbox := nc.NewInbox() - ch := make(chan *Msg, RequestChanLen) - - s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, true, nil) - if err != nil { - return nil, err - } - s.AutoUnsubscribe(1) - defer s.Unsubscribe() - - err = nc.publish(subj, inbox, hdr, data) - if err != nil { - return nil, err - } - - return s.NextMsg(timeout) -} - -// InboxPrefix is the prefix for all inbox subjects. -const ( - InboxPrefix = "_INBOX." - inboxPrefixLen = len(InboxPrefix) - replySuffixLen = 8 // Gives us 62^8 - rdigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" - base = 62 -) - -// NewInbox will return an inbox string which can be used for directed replies from -// subscribers. These are guaranteed to be unique, but can be shared and subscribed -// to by others. -func NewInbox() string { - var b [inboxPrefixLen + nuidSize]byte - pres := b[:inboxPrefixLen] - copy(pres, InboxPrefix) - ns := b[inboxPrefixLen:] - copy(ns, nuid.Next()) - return string(b[:]) -} - -// Create a new inbox that is prefix aware. -func (nc *Conn) NewInbox() string { - if nc.Opts.InboxPrefix == _EMPTY_ { - return NewInbox() - } - - var sb strings.Builder - sb.WriteString(nc.Opts.InboxPrefix) - sb.WriteByte('.') - sb.WriteString(nuid.Next()) - return sb.String() -} - -// Function to init new response structures. -func (nc *Conn) initNewResp() { - nc.respSubPrefix = fmt.Sprintf("%s.", nc.NewInbox()) - nc.respSubLen = len(nc.respSubPrefix) - nc.respSub = fmt.Sprintf("%s*", nc.respSubPrefix) - nc.respMap = make(map[string]chan *Msg) - nc.respRand = rand.New(rand.NewSource(time.Now().UnixNano())) -} - -// newRespInbox creates a new literal response subject -// that will trigger the mux subscription handler. -// Lock should be held. -func (nc *Conn) newRespInbox() string { - if nc.respMap == nil { - nc.initNewResp() - } - - var sb strings.Builder - sb.WriteString(nc.respSubPrefix) - - rn := nc.respRand.Int63() - for i := 0; i < replySuffixLen; i++ { - sb.WriteByte(rdigits[rn%base]) - rn /= base - } - - return sb.String() -} - -// NewRespInbox is the new format used for _INBOX. -func (nc *Conn) NewRespInbox() string { - nc.mu.Lock() - s := nc.newRespInbox() - nc.mu.Unlock() - return s -} - -// respToken will return the last token of a literal response inbox -// which we use for the message channel lookup. This needs to do a -// scan to protect itself against the server changing the subject. -// Lock should be held. -func (nc *Conn) respToken(respInbox string) string { - var token string - n, err := fmt.Sscanf(respInbox, nc.respScanf, &token) - if err != nil || n != 1 { - return "" - } - return token -} - -// Subscribe will express interest in the given subject. The subject -// can have wildcards. -// There are two type of wildcards: * for partial, and > for full. -// A subscription on subject time.*.east would receive messages sent to time.us.east and time.eu.east. -// A subscription on subject time.us.> would receive messages sent to -// time.us.east and time.us.east.atlanta, while time.us.* would only match time.us.east -// since it can't match more than one token. -// Messages will be delivered to the associated MsgHandler. -func (nc *Conn) Subscribe(subj string, cb MsgHandler) (*Subscription, error) { - return nc.subscribe(subj, _EMPTY_, cb, nil, false, nil) -} - -// ChanSubscribe will express interest in the given subject and place -// all messages received on the channel. -// You should not close the channel until sub.Unsubscribe() has been called. -func (nc *Conn) ChanSubscribe(subj string, ch chan *Msg) (*Subscription, error) { - return nc.subscribe(subj, _EMPTY_, nil, ch, false, nil) -} - -// ChanQueueSubscribe will express interest in the given subject. -// All subscribers with the same queue name will form the queue group -// and only one member of the group will be selected to receive any given message, -// which will be placed on the channel. -// You should not close the channel until sub.Unsubscribe() has been called. -// Note: This is the same than QueueSubscribeSyncWithChan. -func (nc *Conn) ChanQueueSubscribe(subj, group string, ch chan *Msg) (*Subscription, error) { - return nc.subscribe(subj, group, nil, ch, false, nil) -} - -// SubscribeSync will express interest on the given subject. Messages will -// be received synchronously using Subscription.NextMsg(). -func (nc *Conn) SubscribeSync(subj string) (*Subscription, error) { - if nc == nil { - return nil, ErrInvalidConnection - } - mch := make(chan *Msg, nc.Opts.SubChanLen) - return nc.subscribe(subj, _EMPTY_, nil, mch, true, nil) -} - -// QueueSubscribe creates an asynchronous queue subscriber on the given subject. -// All subscribers with the same queue name will form the queue group and -// only one member of the group will be selected to receive any given -// message asynchronously. -func (nc *Conn) QueueSubscribe(subj, queue string, cb MsgHandler) (*Subscription, error) { - return nc.subscribe(subj, queue, cb, nil, false, nil) -} - -// QueueSubscribeSync creates a synchronous queue subscriber on the given -// subject. All subscribers with the same queue name will form the queue -// group and only one member of the group will be selected to receive any -// given message synchronously using Subscription.NextMsg(). -func (nc *Conn) QueueSubscribeSync(subj, queue string) (*Subscription, error) { - mch := make(chan *Msg, nc.Opts.SubChanLen) - return nc.subscribe(subj, queue, nil, mch, true, nil) -} - -// QueueSubscribeSyncWithChan will express interest in the given subject. -// All subscribers with the same queue name will form the queue group -// and only one member of the group will be selected to receive any given message, -// which will be placed on the channel. -// You should not close the channel until sub.Unsubscribe() has been called. -// Note: This is the same than ChanQueueSubscribe. -func (nc *Conn) QueueSubscribeSyncWithChan(subj, queue string, ch chan *Msg) (*Subscription, error) { - return nc.subscribe(subj, queue, nil, ch, false, nil) -} - -// badSubject will do quick test on whether a subject is acceptable. -// Spaces are not allowed and all tokens should be > 0 in len. -func badSubject(subj string) bool { - if strings.ContainsAny(subj, " \t\r\n") { - return true - } - tokens := strings.Split(subj, ".") - for _, t := range tokens { - if len(t) == 0 { - return true - } - } - return false -} - -// badQueue will check a queue name for whitespace. -func badQueue(qname string) bool { - return strings.ContainsAny(qname, " \t\r\n") -} - -// subscribe is the internal subscribe function that indicates interest in a subject. -func (nc *Conn) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync bool, js *jsSub) (*Subscription, error) { - if nc == nil { - return nil, ErrInvalidConnection - } - nc.mu.Lock() - defer nc.mu.Unlock() - return nc.subscribeLocked(subj, queue, cb, ch, isSync, js) -} - -func (nc *Conn) subscribeLocked(subj, queue string, cb MsgHandler, ch chan *Msg, isSync bool, js *jsSub) (*Subscription, error) { - if nc == nil { - return nil, ErrInvalidConnection - } - if badSubject(subj) { - return nil, ErrBadSubject - } - if queue != _EMPTY_ && badQueue(queue) { - return nil, ErrBadQueueName - } - - // Check for some error conditions. - if nc.isClosed() { - return nil, ErrConnectionClosed - } - if nc.isDraining() { - return nil, ErrConnectionDraining - } - - if cb == nil && ch == nil { - return nil, ErrBadSubscription - } - - sub := &Subscription{ - Subject: subj, - Queue: queue, - mcb: cb, - conn: nc, - jsi: js, - } - // Set pending limits. - if ch != nil { - sub.pMsgsLimit = cap(ch) - } else { - sub.pMsgsLimit = DefaultSubPendingMsgsLimit - } - sub.pBytesLimit = DefaultSubPendingBytesLimit - - // If we have an async callback, start up a sub specific - // Go routine to deliver the messages. - var sr bool - if cb != nil { - sub.typ = AsyncSubscription - sub.pCond = sync.NewCond(&sub.mu) - sr = true - } else if !isSync { - sub.typ = ChanSubscription - sub.mch = ch - } else { // Sync Subscription - sub.typ = SyncSubscription - sub.mch = ch - } - - nc.subsMu.Lock() - nc.ssid++ - sub.sid = nc.ssid - nc.subs[sub.sid] = sub - nc.subsMu.Unlock() - - // Let's start the go routine now that it is fully setup and registered. - if sr { - go nc.waitForMsgs(sub) - } - - // We will send these for all subs when we reconnect - // so that we can suppress here if reconnecting. - if !nc.isReconnecting() { - nc.bw.appendString(fmt.Sprintf(subProto, subj, queue, sub.sid)) - nc.kickFlusher() - } - - return sub, nil -} - -// NumSubscriptions returns active number of subscriptions. -func (nc *Conn) NumSubscriptions() int { - nc.mu.RLock() - defer nc.mu.RUnlock() - return len(nc.subs) -} - -// Lock for nc should be held here upon entry -func (nc *Conn) removeSub(s *Subscription) { - nc.subsMu.Lock() - delete(nc.subs, s.sid) - nc.subsMu.Unlock() - s.mu.Lock() - defer s.mu.Unlock() - // Release callers on NextMsg for SyncSubscription only - if s.mch != nil && s.typ == SyncSubscription { - close(s.mch) - } - s.mch = nil - - // If JS subscription then stop HB timer. - if jsi := s.jsi; jsi != nil { - if jsi.hbc != nil { - jsi.hbc.Stop() - jsi.hbc = nil - } - if jsi.csfct != nil { - jsi.csfct.Stop() - jsi.csfct = nil - } - } - - if s.typ != AsyncSubscription { - done := s.pDone - if done != nil { - done(s.Subject) - } - } - // Mark as invalid - s.closed = true - if s.pCond != nil { - s.pCond.Broadcast() - } -} - -// SubscriptionType is the type of the Subscription. -type SubscriptionType int - -// The different types of subscription types. -const ( - AsyncSubscription = SubscriptionType(iota) - SyncSubscription - ChanSubscription - NilSubscription - PullSubscription -) - -// Type returns the type of Subscription. -func (s *Subscription) Type() SubscriptionType { - if s == nil { - return NilSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - // Pull subscriptions are really a SyncSubscription and we want this - // type to be set internally for all delivered messages management, etc.. - // So check when to return PullSubscription to the user. - if s.jsi != nil && s.jsi.pull { - return PullSubscription - } - return s.typ -} - -// IsValid returns a boolean indicating whether the subscription -// is still active. This will return false if the subscription has -// already been closed. -func (s *Subscription) IsValid() bool { - if s == nil { - return false - } - s.mu.Lock() - defer s.mu.Unlock() - return s.conn != nil && !s.closed -} - -// Drain will remove interest but continue callbacks until all messages -// have been processed. -// -// For a JetStream subscription, if the library has created the JetStream -// consumer, the library will send a DeleteConsumer request to the server -// when the Drain operation completes. If a failure occurs when deleting -// the JetStream consumer, an error will be reported to the asynchronous -// error callback. -// If you do not wish the JetStream consumer to be automatically deleted, -// ensure that the consumer is not created by the library, which means -// create the consumer with AddConsumer and bind to this consumer. -func (s *Subscription) Drain() error { - if s == nil { - return ErrBadSubscription - } - s.mu.Lock() - conn := s.conn - s.mu.Unlock() - if conn == nil { - return ErrBadSubscription - } - return conn.unsubscribe(s, 0, true) -} - -// Unsubscribe will remove interest in the given subject. -// -// For a JetStream subscription, if the library has created the JetStream -// consumer, it will send a DeleteConsumer request to the server (if the -// unsubscribe itself was successful). If the delete operation fails, the -// error will be returned. -// If you do not wish the JetStream consumer to be automatically deleted, -// ensure that the consumer is not created by the library, which means -// create the consumer with AddConsumer and bind to this consumer (using -// the nats.Bind() option). -func (s *Subscription) Unsubscribe() error { - if s == nil { - return ErrBadSubscription - } - s.mu.Lock() - conn := s.conn - closed := s.closed - dc := s.jsi != nil && s.jsi.dc - s.mu.Unlock() - if conn == nil || conn.IsClosed() { - return ErrConnectionClosed - } - if closed { - return ErrBadSubscription - } - if conn.IsDraining() { - return ErrConnectionDraining - } - err := conn.unsubscribe(s, 0, false) - if err == nil && dc { - err = s.deleteConsumer() - } - return err -} - -// checkDrained will watch for a subscription to be fully drained -// and then remove it. -func (nc *Conn) checkDrained(sub *Subscription) { - if nc == nil || sub == nil { - return - } - - // This allows us to know that whatever we have in the client pending - // is correct and the server will not send additional information. - nc.Flush() - - sub.mu.Lock() - // For JS subscriptions, check if we are going to delete the - // JS consumer when drain completes. - dc := sub.jsi != nil && sub.jsi.dc - sub.mu.Unlock() - - // Once we are here we just wait for Pending to reach 0 or - // any other state to exit this go routine. - for { - // check connection is still valid. - if nc.IsClosed() { - return - } - - // Check subscription state - sub.mu.Lock() - conn := sub.conn - closed := sub.closed - pMsgs := sub.pMsgs - sub.mu.Unlock() - - if conn == nil || closed || pMsgs == 0 { - nc.mu.Lock() - nc.removeSub(sub) - nc.mu.Unlock() - if dc { - if err := sub.deleteConsumer(); err != nil { - nc.mu.Lock() - if errCB := nc.Opts.AsyncErrorCB; errCB != nil { - nc.ach.push(func() { errCB(nc, sub, err) }) - } - nc.mu.Unlock() - } - } - return - } - - time.Sleep(100 * time.Millisecond) - } -} - -// AutoUnsubscribe will issue an automatic Unsubscribe that is -// processed by the server when max messages have been received. -// This can be useful when sending a request to an unknown number -// of subscribers. -func (s *Subscription) AutoUnsubscribe(max int) error { - if s == nil { - return ErrBadSubscription - } - s.mu.Lock() - conn := s.conn - closed := s.closed - s.mu.Unlock() - if conn == nil || closed { - return ErrBadSubscription - } - return conn.unsubscribe(s, max, false) -} - -// SetClosedHandler will set the closed handler for when a subscription -// is closed (either unsubscribed or drained). -func (s *Subscription) SetClosedHandler(handler func(subject string)) { - s.mu.Lock() - s.pDone = handler - s.mu.Unlock() -} - -// unsubscribe performs the low level unsubscribe to the server. -// Use Subscription.Unsubscribe() -func (nc *Conn) unsubscribe(sub *Subscription, max int, drainMode bool) error { - var maxStr string - if max > 0 { - sub.mu.Lock() - sub.max = uint64(max) - if sub.delivered < sub.max { - maxStr = strconv.Itoa(max) - } - sub.mu.Unlock() - } - - nc.mu.Lock() - // ok here, but defer is expensive - defer nc.mu.Unlock() - - if nc.isClosed() { - return ErrConnectionClosed - } - - nc.subsMu.RLock() - s := nc.subs[sub.sid] - nc.subsMu.RUnlock() - // Already unsubscribed - if s == nil { - return nil - } - - if maxStr == _EMPTY_ && !drainMode { - nc.removeSub(s) - } - - if drainMode { - go nc.checkDrained(sub) - } - - // We will send these for all subs when we reconnect - // so that we can suppress here. - if !nc.isReconnecting() { - nc.bw.appendString(fmt.Sprintf(unsubProto, s.sid, maxStr)) - nc.kickFlusher() - } - - // For JetStream subscriptions cancel the attached context if there is any. - var cancel func() - sub.mu.Lock() - jsi := sub.jsi - if jsi != nil { - cancel = jsi.cancel - jsi.cancel = nil - } - sub.mu.Unlock() - if cancel != nil { - cancel() - } - - return nil -} - -// NextMsg will return the next message available to a synchronous subscriber -// or block until one is available. An error is returned if the subscription is invalid (ErrBadSubscription), -// the connection is closed (ErrConnectionClosed), the timeout is reached (ErrTimeout), -// or if there were no responders (ErrNoResponders) when used in the context of a request/reply. -func (s *Subscription) NextMsg(timeout time.Duration) (*Msg, error) { - if s == nil { - return nil, ErrBadSubscription - } - - s.mu.Lock() - err := s.validateNextMsgState(false) - if err != nil { - s.mu.Unlock() - return nil, err - } - - // snapshot - mch := s.mch - s.mu.Unlock() - - var ok bool - var msg *Msg - - // If something is available right away, let's optimize that case. - select { - case msg, ok = <-mch: - if !ok { - return nil, s.getNextMsgErr() - } - if err := s.processNextMsgDelivered(msg); err != nil { - return nil, err - } else { - return msg, nil - } - default: - } - - // If we are here a message was not immediately available, so lets loop - // with a timeout. - - t := globalTimerPool.Get(timeout) - defer globalTimerPool.Put(t) - - select { - case msg, ok = <-mch: - if !ok { - return nil, s.getNextMsgErr() - } - if err := s.processNextMsgDelivered(msg); err != nil { - return nil, err - } - case <-t.C: - return nil, ErrTimeout - } - - return msg, nil -} - -// validateNextMsgState checks whether the subscription is in a valid -// state to call NextMsg and be delivered another message synchronously. -// This should be called while holding the lock. -func (s *Subscription) validateNextMsgState(pullSubInternal bool) error { - if s.connClosed { - return ErrConnectionClosed - } - if s.mch == nil { - if s.max > 0 && s.delivered >= s.max { - return ErrMaxMessages - } else if s.closed { - return ErrBadSubscription - } - } - if s.mcb != nil { - return ErrSyncSubRequired - } - if s.sc { - s.sc = false - return ErrSlowConsumer - } - // Unless this is from an internal call, reject use of this API. - // Users should use Fetch() instead. - if !pullSubInternal && s.jsi != nil && s.jsi.pull { - return ErrTypeSubscription - } - return nil -} - -// This is called when the sync channel has been closed. -// The error returned will be either connection or subscription -// closed depending on what caused NextMsg() to fail. -func (s *Subscription) getNextMsgErr() error { - s.mu.Lock() - defer s.mu.Unlock() - if s.connClosed { - return ErrConnectionClosed - } - return ErrBadSubscription -} - -// processNextMsgDelivered takes a message and applies the needed -// accounting to the stats from the subscription, returning an -// error in case we have the maximum number of messages have been -// delivered already. It should not be called while holding the lock. -func (s *Subscription) processNextMsgDelivered(msg *Msg) error { - s.mu.Lock() - nc := s.conn - max := s.max - - var fcReply string - // Update some stats. - s.delivered++ - delivered := s.delivered - if s.jsi != nil { - fcReply = s.checkForFlowControlResponse() - } - - if s.typ == SyncSubscription { - s.pMsgs-- - s.pBytes -= len(msg.Data) - } - s.mu.Unlock() - - if fcReply != _EMPTY_ { - nc.Publish(fcReply, nil) - } - - if max > 0 { - if delivered > max { - return ErrMaxMessages - } - // Remove subscription if we have reached max. - if delivered == max { - nc.mu.Lock() - nc.removeSub(s) - nc.mu.Unlock() - } - } - if len(msg.Data) == 0 && msg.Header.Get(statusHdr) == noResponders { - return ErrNoResponders - } - - return nil -} - -// Queued returns the number of queued messages in the client for this subscription. -// DEPRECATED: Use Pending() -func (s *Subscription) QueuedMsgs() (int, error) { - m, _, err := s.Pending() - return int(m), err -} - -// Pending returns the number of queued messages and queued bytes in the client for this subscription. -func (s *Subscription) Pending() (int, int, error) { - if s == nil { - return -1, -1, ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil || s.closed { - return -1, -1, ErrBadSubscription - } - if s.typ == ChanSubscription { - return -1, -1, ErrTypeSubscription - } - return s.pMsgs, s.pBytes, nil -} - -// MaxPending returns the maximum number of queued messages and queued bytes seen so far. -func (s *Subscription) MaxPending() (int, int, error) { - if s == nil { - return -1, -1, ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil || s.closed { - return -1, -1, ErrBadSubscription - } - if s.typ == ChanSubscription { - return -1, -1, ErrTypeSubscription - } - return s.pMsgsMax, s.pBytesMax, nil -} - -// ClearMaxPending resets the maximums seen so far. -func (s *Subscription) ClearMaxPending() error { - if s == nil { - return ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil || s.closed { - return ErrBadSubscription - } - if s.typ == ChanSubscription { - return ErrTypeSubscription - } - s.pMsgsMax, s.pBytesMax = 0, 0 - return nil -} - -// Pending Limits -const ( - // DefaultSubPendingMsgsLimit will be 512k msgs. - DefaultSubPendingMsgsLimit = 512 * 1024 - // DefaultSubPendingBytesLimit is 64MB - DefaultSubPendingBytesLimit = 64 * 1024 * 1024 -) - -// PendingLimits returns the current limits for this subscription. -// If no error is returned, a negative value indicates that the -// given metric is not limited. -func (s *Subscription) PendingLimits() (int, int, error) { - if s == nil { - return -1, -1, ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil || s.closed { - return -1, -1, ErrBadSubscription - } - if s.typ == ChanSubscription { - return -1, -1, ErrTypeSubscription - } - return s.pMsgsLimit, s.pBytesLimit, nil -} - -// SetPendingLimits sets the limits for pending msgs and bytes for this subscription. -// Zero is not allowed. Any negative value means that the given metric is not limited. -func (s *Subscription) SetPendingLimits(msgLimit, bytesLimit int) error { - if s == nil { - return ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil || s.closed { - return ErrBadSubscription - } - if s.typ == ChanSubscription { - return ErrTypeSubscription - } - if msgLimit == 0 || bytesLimit == 0 { - return ErrInvalidArg - } - s.pMsgsLimit, s.pBytesLimit = msgLimit, bytesLimit - return nil -} - -// Delivered returns the number of delivered messages for this subscription. -func (s *Subscription) Delivered() (int64, error) { - if s == nil { - return -1, ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil || s.closed { - return -1, ErrBadSubscription - } - return int64(s.delivered), nil -} - -// Dropped returns the number of known dropped messages for this subscription. -// This will correspond to messages dropped by violations of PendingLimits. If -// the server declares the connection a SlowConsumer, this number may not be -// valid. -func (s *Subscription) Dropped() (int, error) { - if s == nil { - return -1, ErrBadSubscription - } - s.mu.Lock() - defer s.mu.Unlock() - if s.conn == nil || s.closed { - return -1, ErrBadSubscription - } - return s.dropped, nil -} - -// Respond allows a convenient way to respond to requests in service based subscriptions. -func (m *Msg) Respond(data []byte) error { - if m == nil || m.Sub == nil { - return ErrMsgNotBound - } - if m.Reply == "" { - return ErrMsgNoReply - } - m.Sub.mu.Lock() - nc := m.Sub.conn - m.Sub.mu.Unlock() - // No need to check the connection here since the call to publish will do all the checking. - return nc.Publish(m.Reply, data) -} - -// RespondMsg allows a convenient way to respond to requests in service based subscriptions that might include headers -func (m *Msg) RespondMsg(msg *Msg) error { - if m == nil || m.Sub == nil { - return ErrMsgNotBound - } - if m.Reply == "" { - return ErrMsgNoReply - } - msg.Subject = m.Reply - m.Sub.mu.Lock() - nc := m.Sub.conn - m.Sub.mu.Unlock() - // No need to check the connection here since the call to publish will do all the checking. - return nc.PublishMsg(msg) -} - -// FIXME: This is a hack -// removeFlushEntry is needed when we need to discard queued up responses -// for our pings as part of a flush call. This happens when we have a flush -// call outstanding and we call close. -func (nc *Conn) removeFlushEntry(ch chan struct{}) bool { - nc.mu.Lock() - defer nc.mu.Unlock() - if nc.pongs == nil { - return false - } - for i, c := range nc.pongs { - if c == ch { - nc.pongs[i] = nil - return true - } - } - return false -} - -// The lock must be held entering this function. -func (nc *Conn) sendPing(ch chan struct{}) { - nc.pongs = append(nc.pongs, ch) - nc.bw.appendString(pingProto) - // Flush in place. - nc.bw.flush() -} - -// This will fire periodically and send a client origin -// ping to the server. Will also check that we have received -// responses from the server. -func (nc *Conn) processPingTimer() { - nc.mu.Lock() - - if nc.status != CONNECTED { - nc.mu.Unlock() - return - } - - // Check for violation - nc.pout++ - if nc.pout > nc.Opts.MaxPingsOut { - nc.mu.Unlock() - nc.processOpErr(ErrStaleConnection) - return - } - - nc.sendPing(nil) - nc.ptmr.Reset(nc.Opts.PingInterval) - nc.mu.Unlock() -} - -// FlushTimeout allows a Flush operation to have an associated timeout. -func (nc *Conn) FlushTimeout(timeout time.Duration) (err error) { - if nc == nil { - return ErrInvalidConnection - } - if timeout <= 0 { - return ErrBadTimeout - } - - nc.mu.Lock() - if nc.isClosed() { - nc.mu.Unlock() - return ErrConnectionClosed - } - t := globalTimerPool.Get(timeout) - defer globalTimerPool.Put(t) - - // Create a buffered channel to prevent chan send to block - // in processPong() if this code here times out just when - // PONG was received. - ch := make(chan struct{}, 1) - nc.sendPing(ch) - nc.mu.Unlock() - - select { - case _, ok := <-ch: - if !ok { - err = ErrConnectionClosed - } else { - close(ch) - } - case <-t.C: - err = ErrTimeout - } - - if err != nil { - nc.removeFlushEntry(ch) - } - return -} - -// RTT calculates the round trip time between this client and the server. -func (nc *Conn) RTT() (time.Duration, error) { - if nc.IsClosed() { - return 0, ErrConnectionClosed - } - if nc.IsReconnecting() { - return 0, ErrDisconnected - } - start := time.Now() - if err := nc.FlushTimeout(10 * time.Second); err != nil { - return 0, err - } - return time.Since(start), nil -} - -// Flush will perform a round trip to the server and return when it -// receives the internal reply. -func (nc *Conn) Flush() error { - return nc.FlushTimeout(10 * time.Second) -} - -// Buffered will return the number of bytes buffered to be sent to the server. -// FIXME(dlc) take into account disconnected state. -func (nc *Conn) Buffered() (int, error) { - nc.mu.RLock() - defer nc.mu.RUnlock() - if nc.isClosed() || nc.bw == nil { - return -1, ErrConnectionClosed - } - return nc.bw.buffered(), nil -} - -// resendSubscriptions will send our subscription state back to the -// server. Used in reconnects -func (nc *Conn) resendSubscriptions() { - // Since we are going to send protocols to the server, we don't want to - // be holding the subsMu lock (which is used in processMsg). So copy - // the subscriptions in a temporary array. - nc.subsMu.RLock() - subs := make([]*Subscription, 0, len(nc.subs)) - for _, s := range nc.subs { - subs = append(subs, s) - } - nc.subsMu.RUnlock() - for _, s := range subs { - adjustedMax := uint64(0) - s.mu.Lock() - if s.max > 0 { - if s.delivered < s.max { - adjustedMax = s.max - s.delivered - } - // adjustedMax could be 0 here if the number of delivered msgs - // reached the max, if so unsubscribe. - if adjustedMax == 0 { - s.mu.Unlock() - nc.bw.writeDirect(fmt.Sprintf(unsubProto, s.sid, _EMPTY_)) - continue - } - } - subj, queue, sid := s.Subject, s.Queue, s.sid - s.mu.Unlock() - - nc.bw.writeDirect(fmt.Sprintf(subProto, subj, queue, sid)) - if adjustedMax > 0 { - maxStr := strconv.Itoa(int(adjustedMax)) - nc.bw.writeDirect(fmt.Sprintf(unsubProto, sid, maxStr)) - } - } -} - -// This will clear any pending flush calls and release pending calls. -// Lock is assumed to be held by the caller. -func (nc *Conn) clearPendingFlushCalls() { - // Clear any queued pongs, e.g. pending flush calls. - for _, ch := range nc.pongs { - if ch != nil { - close(ch) - } - } - nc.pongs = nil -} - -// This will clear any pending Request calls. -// Lock is assumed to be held by the caller. -func (nc *Conn) clearPendingRequestCalls() { - if nc.respMap == nil { - return - } - for key, ch := range nc.respMap { - if ch != nil { - close(ch) - delete(nc.respMap, key) - } - } -} - -// Low level close call that will do correct cleanup and set -// desired status. Also controls whether user defined callbacks -// will be triggered. The lock should not be held entering this -// function. This function will handle the locking manually. -func (nc *Conn) close(status Status, doCBs bool, err error) { - nc.mu.Lock() - if nc.isClosed() { - nc.status = status - nc.mu.Unlock() - return - } - nc.status = CLOSED - - // Kick the Go routines so they fall out. - nc.kickFlusher() - - // If the reconnect timer is waiting between a reconnect attempt, - // this will kick it out. - if nc.rqch != nil { - close(nc.rqch) - nc.rqch = nil - } - - // Clear any queued pongs, e.g. pending flush calls. - nc.clearPendingFlushCalls() - - // Clear any queued and blocking Requests. - nc.clearPendingRequestCalls() - - // Stop ping timer if set. - nc.stopPingTimer() - nc.ptmr = nil - - // Need to close and set TCP conn to nil if reconnect loop has stopped, - // otherwise we would incorrectly invoke Disconnect handler (if set) - // down below. - if nc.ar && nc.conn != nil { - nc.conn.Close() - nc.conn = nil - } else if nc.conn != nil { - // Go ahead and make sure we have flushed the outbound - nc.bw.flush() - defer nc.conn.Close() - } - - // Close sync subscriber channels and release any - // pending NextMsg() calls. - nc.subsMu.Lock() - for _, s := range nc.subs { - s.mu.Lock() - - // Release callers on NextMsg for SyncSubscription only - if s.mch != nil && s.typ == SyncSubscription { - close(s.mch) - } - s.mch = nil - // Mark as invalid, for signaling to waitForMsgs - s.closed = true - // Mark connection closed in subscription - s.connClosed = true - // If we have an async subscription, signals it to exit - if s.typ == AsyncSubscription && s.pCond != nil { - s.pCond.Signal() - } - - s.mu.Unlock() - } - nc.subs = nil - nc.subsMu.Unlock() - - nc.changeConnStatus(status) - - // Perform appropriate callback if needed for a disconnect. - if doCBs { - if nc.conn != nil { - if disconnectedErrCB := nc.Opts.DisconnectedErrCB; disconnectedErrCB != nil { - nc.ach.push(func() { disconnectedErrCB(nc, err) }) - } else if disconnectedCB := nc.Opts.DisconnectedCB; disconnectedCB != nil { - nc.ach.push(func() { disconnectedCB(nc) }) - } - } - if nc.Opts.ClosedCB != nil { - nc.ach.push(func() { nc.Opts.ClosedCB(nc) }) - } - } - // If this is terminal, then we have to notify the asyncCB handler that - // it can exit once all async callbacks have been dispatched. - if status == CLOSED { - nc.ach.close() - } - nc.mu.Unlock() -} - -// Close will close the connection to the server. This call will release -// all blocking calls, such as Flush() and NextMsg() -func (nc *Conn) Close() { - if nc != nil { - // This will be a no-op if the connection was not websocket. - // We do this here as opposed to inside close() because we want - // to do this only for the final user-driven close of the client. - // Otherwise, we would need to change close() to pass a boolean - // indicating that this is the case. - nc.wsClose() - nc.close(CLOSED, !nc.Opts.NoCallbacksAfterClientClose, nil) - } -} - -// IsClosed tests if a Conn has been closed. -func (nc *Conn) IsClosed() bool { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.isClosed() -} - -// IsReconnecting tests if a Conn is reconnecting. -func (nc *Conn) IsReconnecting() bool { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.isReconnecting() -} - -// IsConnected tests if a Conn is connected. -func (nc *Conn) IsConnected() bool { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.isConnected() -} - -// drainConnection will run in a separate Go routine and will -// flush all publishes and drain all active subscriptions. -func (nc *Conn) drainConnection() { - // Snapshot subs list. - nc.mu.Lock() - - // Check again here if we are in a state to not process. - if nc.isClosed() { - nc.mu.Unlock() - return - } - if nc.isConnecting() || nc.isReconnecting() { - nc.mu.Unlock() - // Move to closed state. - nc.Close() - return - } - - subs := make([]*Subscription, 0, len(nc.subs)) - for _, s := range nc.subs { - if s == nc.respMux { - // Skip since might be in use while messages - // are being processed (can miss responses). - continue - } - subs = append(subs, s) - } - errCB := nc.Opts.AsyncErrorCB - drainWait := nc.Opts.DrainTimeout - respMux := nc.respMux - nc.mu.Unlock() - - // for pushing errors with context. - pushErr := func(err error) { - nc.mu.Lock() - nc.err = err - if errCB != nil { - nc.ach.push(func() { errCB(nc, nil, err) }) - } - nc.mu.Unlock() - } - - // Do subs first, skip request handler if present. - for _, s := range subs { - if err := s.Drain(); err != nil { - // We will notify about these but continue. - pushErr(err) - } - } - - // Wait for the subscriptions to drop to zero. - timeout := time.Now().Add(drainWait) - var min int - if respMux != nil { - min = 1 - } else { - min = 0 - } - for time.Now().Before(timeout) { - if nc.NumSubscriptions() == min { - break - } - time.Sleep(10 * time.Millisecond) - } - - // In case there was a request/response handler - // then need to call drain at the end. - if respMux != nil { - if err := respMux.Drain(); err != nil { - // We will notify about these but continue. - pushErr(err) - } - for time.Now().Before(timeout) { - if nc.NumSubscriptions() == 0 { - break - } - time.Sleep(10 * time.Millisecond) - } - } - - // Check if we timed out. - if nc.NumSubscriptions() != 0 { - pushErr(ErrDrainTimeout) - } - - // Flip State - nc.mu.Lock() - nc.changeConnStatus(DRAINING_PUBS) - nc.mu.Unlock() - - // Do publish drain via Flush() call. - err := nc.FlushTimeout(5 * time.Second) - if err != nil { - pushErr(err) - } - - // Move to closed state. - nc.Close() -} - -// Drain will put a connection into a drain state. All subscriptions will -// immediately be put into a drain state. Upon completion, the publishers -// will be drained and can not publish any additional messages. Upon draining -// of the publishers, the connection will be closed. Use the ClosedCB() -// option to know when the connection has moved from draining to closed. -// -// See note in Subscription.Drain for JetStream subscriptions. -func (nc *Conn) Drain() error { - nc.mu.Lock() - if nc.isClosed() { - nc.mu.Unlock() - return ErrConnectionClosed - } - if nc.isConnecting() || nc.isReconnecting() { - nc.mu.Unlock() - nc.Close() - return ErrConnectionReconnecting - } - if nc.isDraining() { - nc.mu.Unlock() - return nil - } - nc.changeConnStatus(DRAINING_SUBS) - go nc.drainConnection() - nc.mu.Unlock() - - return nil -} - -// IsDraining tests if a Conn is in the draining state. -func (nc *Conn) IsDraining() bool { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.isDraining() -} - -// caller must lock -func (nc *Conn) getServers(implicitOnly bool) []string { - poolSize := len(nc.srvPool) - var servers = make([]string, 0) - for i := 0; i < poolSize; i++ { - if implicitOnly && !nc.srvPool[i].isImplicit { - continue - } - url := nc.srvPool[i].url - servers = append(servers, fmt.Sprintf("%s://%s", url.Scheme, url.Host)) - } - return servers -} - -// Servers returns the list of known server urls, including additional -// servers discovered after a connection has been established. If -// authentication is enabled, use UserInfo or Token when connecting with -// these urls. -func (nc *Conn) Servers() []string { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.getServers(false) -} - -// DiscoveredServers returns only the server urls that have been discovered -// after a connection has been established. If authentication is enabled, -// use UserInfo or Token when connecting with these urls. -func (nc *Conn) DiscoveredServers() []string { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.getServers(true) -} - -// Status returns the current state of the connection. -func (nc *Conn) Status() Status { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.status -} - -// Test if Conn has been closed Lock is assumed held. -func (nc *Conn) isClosed() bool { - return nc.status == CLOSED -} - -// Test if Conn is in the process of connecting -func (nc *Conn) isConnecting() bool { - return nc.status == CONNECTING -} - -// Test if Conn is being reconnected. -func (nc *Conn) isReconnecting() bool { - return nc.status == RECONNECTING -} - -// Test if Conn is connected or connecting. -func (nc *Conn) isConnected() bool { - return nc.status == CONNECTED || nc.isDraining() -} - -// Test if Conn is in the draining state. -func (nc *Conn) isDraining() bool { - return nc.status == DRAINING_SUBS || nc.status == DRAINING_PUBS -} - -// Test if Conn is in the draining state for pubs. -func (nc *Conn) isDrainingPubs() bool { - return nc.status == DRAINING_PUBS -} - -// Stats will return a race safe copy of the Statistics section for the connection. -func (nc *Conn) Stats() Statistics { - // Stats are updated either under connection's mu or with atomic operations - // for inbound stats in processMsg(). - nc.mu.Lock() - stats := Statistics{ - InMsgs: atomic.LoadUint64(&nc.InMsgs), - InBytes: atomic.LoadUint64(&nc.InBytes), - OutMsgs: nc.OutMsgs, - OutBytes: nc.OutBytes, - Reconnects: nc.Reconnects, - } - nc.mu.Unlock() - return stats -} - -// MaxPayload returns the size limit that a message payload can have. -// This is set by the server configuration and delivered to the client -// upon connect. -func (nc *Conn) MaxPayload() int64 { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.info.MaxPayload -} - -// HeadersSupported will return if the server supports headers -func (nc *Conn) HeadersSupported() bool { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.info.Headers -} - -// AuthRequired will return if the connected server requires authorization. -func (nc *Conn) AuthRequired() bool { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.info.AuthRequired -} - -// TLSRequired will return if the connected server requires TLS connections. -func (nc *Conn) TLSRequired() bool { - nc.mu.RLock() - defer nc.mu.RUnlock() - return nc.info.TLSRequired -} - -// Barrier schedules the given function `f` to all registered asynchronous -// subscriptions. -// Only the last subscription to see this barrier will invoke the function. -// If no subscription is registered at the time of this call, `f()` is invoked -// right away. -// ErrConnectionClosed is returned if the connection is closed prior to -// the call. -func (nc *Conn) Barrier(f func()) error { - nc.mu.Lock() - if nc.isClosed() { - nc.mu.Unlock() - return ErrConnectionClosed - } - nc.subsMu.Lock() - // Need to figure out how many non chan subscriptions there are - numSubs := 0 - for _, sub := range nc.subs { - if sub.typ == AsyncSubscription { - numSubs++ - } - } - if numSubs == 0 { - nc.subsMu.Unlock() - nc.mu.Unlock() - f() - return nil - } - barrier := &barrierInfo{refs: int64(numSubs), f: f} - for _, sub := range nc.subs { - sub.mu.Lock() - if sub.mch == nil { - msg := &Msg{barrier: barrier} - // Push onto the async pList - if sub.pTail != nil { - sub.pTail.next = msg - } else { - sub.pHead = msg - sub.pCond.Signal() - } - sub.pTail = msg - } - sub.mu.Unlock() - } - nc.subsMu.Unlock() - nc.mu.Unlock() - return nil -} - -// GetClientIP returns the client IP as known by the server. -// Supported as of server version 2.1.6. -func (nc *Conn) GetClientIP() (net.IP, error) { - nc.mu.RLock() - defer nc.mu.RUnlock() - if nc.isClosed() { - return nil, ErrConnectionClosed - } - if nc.info.ClientIP == "" { - return nil, ErrClientIPNotSupported - } - ip := net.ParseIP(nc.info.ClientIP) - return ip, nil -} - -// GetClientID returns the client ID assigned by the server to which -// the client is currently connected to. Note that the value may change if -// the client reconnects. -// This function returns ErrClientIDNotSupported if the server is of a -// version prior to 1.2.0. -func (nc *Conn) GetClientID() (uint64, error) { - nc.mu.RLock() - defer nc.mu.RUnlock() - if nc.isClosed() { - return 0, ErrConnectionClosed - } - if nc.info.CID == 0 { - return 0, ErrClientIDNotSupported - } - return nc.info.CID, nil -} - -// StatusChanged returns a channel on which given list of connection status changes will be reported. -// If no statuses are provided, defaults will be used: CONNECTED, RECONNECTING, DISCONNECTED, CLOSED. -func (nc *Conn) StatusChanged(statuses ...Status) chan Status { - if len(statuses) == 0 { - statuses = []Status{CONNECTED, RECONNECTING, DISCONNECTED, CLOSED} - } - ch := make(chan Status, 10) - for _, s := range statuses { - nc.registerStatusChangeListener(s, ch) - } - return ch -} - -// registerStatusChangeListener registers a channel waiting for a specific status change event. -// Status change events are non-blocking - if no receiver is waiting for the status change, -// it will not be sent on the channel. Closed channels are ignored. -func (nc *Conn) registerStatusChangeListener(status Status, ch chan Status) { - nc.mu.Lock() - defer nc.mu.Unlock() - if nc.statListeners == nil { - nc.statListeners = make(map[Status][]chan Status) - } - if _, ok := nc.statListeners[status]; !ok { - nc.statListeners[status] = make([]chan Status, 0) - } - nc.statListeners[status] = append(nc.statListeners[status], ch) -} - -// sendStatusEvent sends connection status event to all channels. -// If channel is closed, or there is no listener, sendStatusEvent -// will not block. Lock should be held entering. -func (nc *Conn) sendStatusEvent(s Status) { -Loop: - for i := 0; i < len(nc.statListeners[s]); i++ { - // make sure channel is not closed - select { - case <-nc.statListeners[s][i]: - // if chan is closed, remove it - nc.statListeners[s][i] = nc.statListeners[s][len(nc.statListeners[s])-1] - nc.statListeners[s] = nc.statListeners[s][:len(nc.statListeners[s])-1] - i-- - continue Loop - default: - } - // only send event if someone's listening - select { - case nc.statListeners[s][i] <- s: - default: - } - } -} - -// changeConnStatus changes connections status and sends events -// to all listeners. Lock should be held entering. -func (nc *Conn) changeConnStatus(status Status) { - if nc == nil { - return - } - nc.sendStatusEvent(status) - nc.status = status -} - -// NkeyOptionFromSeed will load an nkey pair from a seed file. -// It will return the NKey Option and will handle -// signing of nonce challenges from the server. It will take -// care to not hold keys in memory and to wipe memory. -func NkeyOptionFromSeed(seedFile string) (Option, error) { - kp, err := nkeyPairFromSeedFile(seedFile) - if err != nil { - return nil, err - } - // Wipe our key on exit. - defer kp.Wipe() - - pub, err := kp.PublicKey() - if err != nil { - return nil, err - } - if !nkeys.IsValidPublicUserKey(pub) { - return nil, fmt.Errorf("nats: Not a valid nkey user seed") - } - sigCB := func(nonce []byte) ([]byte, error) { - return sigHandler(nonce, seedFile) - } - return Nkey(string(pub), sigCB), nil -} - -// Just wipe slice with 'x', for clearing contents of creds or nkey seed file. -func wipeSlice(buf []byte) { - for i := range buf { - buf[i] = 'x' - } -} - -func userFromFile(userFile string) (string, error) { - path, err := expandPath(userFile) - if err != nil { - return _EMPTY_, fmt.Errorf("nats: %w", err) - } - - contents, err := os.ReadFile(path) - if err != nil { - return _EMPTY_, fmt.Errorf("nats: %w", err) - } - defer wipeSlice(contents) - return nkeys.ParseDecoratedJWT(contents) -} - -func homeDir() (string, error) { - if runtime.GOOS == "windows" { - homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH") - userProfile := os.Getenv("USERPROFILE") - - var home string - if homeDrive == "" || homePath == "" { - if userProfile == "" { - return _EMPTY_, errors.New("nats: failed to get home dir, require %HOMEDRIVE% and %HOMEPATH% or %USERPROFILE%") - } - home = userProfile - } else { - home = filepath.Join(homeDrive, homePath) - } - - return home, nil - } - - home := os.Getenv("HOME") - if home == "" { - return _EMPTY_, errors.New("nats: failed to get home dir, require $HOME") - } - return home, nil -} - -func expandPath(p string) (string, error) { - p = os.ExpandEnv(p) - - if !strings.HasPrefix(p, "~") { - return p, nil - } - - home, err := homeDir() - if err != nil { - return _EMPTY_, err - } - - return filepath.Join(home, p[1:]), nil -} - -func nkeyPairFromSeedFile(seedFile string) (nkeys.KeyPair, error) { - contents, err := os.ReadFile(seedFile) - if err != nil { - return nil, fmt.Errorf("nats: %w", err) - } - defer wipeSlice(contents) - return nkeys.ParseDecoratedNKey(contents) -} - -// Sign authentication challenges from the server. -// Do not keep private seed in memory. -func sigHandler(nonce []byte, seedFile string) ([]byte, error) { - kp, err := nkeyPairFromSeedFile(seedFile) - if err != nil { - return nil, fmt.Errorf("unable to extract key pair from file %q: %w", seedFile, err) - } - // Wipe our key on exit. - defer kp.Wipe() - - sig, _ := kp.Sign(nonce) - return sig, nil -} - -type timeoutWriter struct { - timeout time.Duration - conn net.Conn - err error -} - -// Write implements the io.Writer interface. -func (tw *timeoutWriter) Write(p []byte) (int, error) { - if tw.err != nil { - return 0, tw.err - } - - var n int - tw.conn.SetWriteDeadline(time.Now().Add(tw.timeout)) - n, tw.err = tw.conn.Write(p) - tw.conn.SetWriteDeadline(time.Time{}) - return n, tw.err -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/netchan.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/netchan.go deleted file mode 100644 index 6b13690..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/netchan.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2013-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "errors" - "reflect" -) - -// This allows the functionality for network channels by binding send and receive Go chans -// to subjects and optionally queue groups. -// Data will be encoded and decoded via the EncodedConn and its associated encoders. - -// BindSendChan binds a channel for send operations to NATS. -func (c *EncodedConn) BindSendChan(subject string, channel any) error { - chVal := reflect.ValueOf(channel) - if chVal.Kind() != reflect.Chan { - return ErrChanArg - } - go chPublish(c, chVal, subject) - return nil -} - -// Publish all values that arrive on the channel until it is closed or we -// encounter an error. -func chPublish(c *EncodedConn, chVal reflect.Value, subject string) { - for { - val, ok := chVal.Recv() - if !ok { - // Channel has most likely been closed. - return - } - if e := c.Publish(subject, val.Interface()); e != nil { - // Do this under lock. - c.Conn.mu.Lock() - defer c.Conn.mu.Unlock() - - if c.Conn.Opts.AsyncErrorCB != nil { - // FIXME(dlc) - Not sure this is the right thing to do. - // FIXME(ivan) - If the connection is not yet closed, try to schedule the callback - if c.Conn.isClosed() { - go c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) - } else { - c.Conn.ach.push(func() { c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) }) - } - } - return - } - } -} - -// BindRecvChan binds a channel for receive operations from NATS. -func (c *EncodedConn) BindRecvChan(subject string, channel any) (*Subscription, error) { - return c.bindRecvChan(subject, _EMPTY_, channel) -} - -// BindRecvQueueChan binds a channel for queue-based receive operations from NATS. -func (c *EncodedConn) BindRecvQueueChan(subject, queue string, channel any) (*Subscription, error) { - return c.bindRecvChan(subject, queue, channel) -} - -// Internal function to bind receive operations for a channel. -func (c *EncodedConn) bindRecvChan(subject, queue string, channel any) (*Subscription, error) { - chVal := reflect.ValueOf(channel) - if chVal.Kind() != reflect.Chan { - return nil, ErrChanArg - } - argType := chVal.Type().Elem() - - cb := func(m *Msg) { - var oPtr reflect.Value - if argType.Kind() != reflect.Ptr { - oPtr = reflect.New(argType) - } else { - oPtr = reflect.New(argType.Elem()) - } - if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil { - c.Conn.err = errors.New("nats: Got an error trying to unmarshal: " + err.Error()) - if c.Conn.Opts.AsyncErrorCB != nil { - c.Conn.ach.push(func() { c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, c.Conn.err) }) - } - return - } - if argType.Kind() != reflect.Ptr { - oPtr = reflect.Indirect(oPtr) - } - // This is a bit hacky, but in this instance we may be trying to send to a closed channel. - // and the user does not know when it is safe to close the channel. - defer func() { - // If we have panicked, recover and close the subscription. - if r := recover(); r != nil { - m.Sub.Unsubscribe() - } - }() - // Actually do the send to the channel. - chVal.Send(oPtr) - } - - return c.Conn.subscribe(subject, queue, cb, nil, false, nil) -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/object.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/object.go deleted file mode 100644 index f3f5d97..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/object.go +++ /dev/null @@ -1,1422 +0,0 @@ -// Copyright 2021-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "hash" - "io" - "net" - "os" - "strings" - "sync" - "time" - - "github.com/nats-io/nats.go/internal/parser" - "github.com/nats-io/nuid" -) - -// ObjectStoreManager creates, loads and deletes Object Stores -type ObjectStoreManager interface { - // ObjectStore will look up and bind to an existing object store instance. - ObjectStore(bucket string) (ObjectStore, error) - // CreateObjectStore will create an object store. - CreateObjectStore(cfg *ObjectStoreConfig) (ObjectStore, error) - // DeleteObjectStore will delete the underlying stream for the named object. - DeleteObjectStore(bucket string) error - // ObjectStoreNames is used to retrieve a list of bucket names - ObjectStoreNames(opts ...ObjectOpt) <-chan string - // ObjectStores is used to retrieve a list of bucket statuses - ObjectStores(opts ...ObjectOpt) <-chan ObjectStoreStatus -} - -// ObjectStore is a blob store capable of storing large objects efficiently in -// JetStream streams -type ObjectStore interface { - // Put will place the contents from the reader into a new object. - Put(obj *ObjectMeta, reader io.Reader, opts ...ObjectOpt) (*ObjectInfo, error) - // Get will pull the named object from the object store. - Get(name string, opts ...GetObjectOpt) (ObjectResult, error) - - // PutBytes is convenience function to put a byte slice into this object store. - PutBytes(name string, data []byte, opts ...ObjectOpt) (*ObjectInfo, error) - // GetBytes is a convenience function to pull an object from this object store and return it as a byte slice. - GetBytes(name string, opts ...GetObjectOpt) ([]byte, error) - - // PutString is convenience function to put a string into this object store. - PutString(name string, data string, opts ...ObjectOpt) (*ObjectInfo, error) - // GetString is a convenience function to pull an object from this object store and return it as a string. - GetString(name string, opts ...GetObjectOpt) (string, error) - - // PutFile is convenience function to put a file into this object store. - PutFile(file string, opts ...ObjectOpt) (*ObjectInfo, error) - // GetFile is a convenience function to pull an object from this object store and place it in a file. - GetFile(name, file string, opts ...GetObjectOpt) error - - // GetInfo will retrieve the current information for the object. - GetInfo(name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error) - // UpdateMeta will update the metadata for the object. - UpdateMeta(name string, meta *ObjectMeta) error - - // Delete will delete the named object. - Delete(name string) error - - // AddLink will add a link to another object. - AddLink(name string, obj *ObjectInfo) (*ObjectInfo, error) - - // AddBucketLink will add a link to another object store. - AddBucketLink(name string, bucket ObjectStore) (*ObjectInfo, error) - - // Seal will seal the object store, no further modifications will be allowed. - Seal() error - - // Watch for changes in the underlying store and receive meta information updates. - Watch(opts ...WatchOpt) (ObjectWatcher, error) - - // List will list all the objects in this store. - List(opts ...ListObjectsOpt) ([]*ObjectInfo, error) - - // Status retrieves run-time status about the backing store of the bucket. - Status() (ObjectStoreStatus, error) -} - -type ObjectOpt interface { - configureObject(opts *objOpts) error -} - -type objOpts struct { - ctx context.Context -} - -// For nats.Context() support. -func (ctx ContextOpt) configureObject(opts *objOpts) error { - opts.ctx = ctx - return nil -} - -// ObjectWatcher is what is returned when doing a watch. -type ObjectWatcher interface { - // Updates returns a channel to read any updates to entries. - Updates() <-chan *ObjectInfo - // Stop will stop this watcher. - Stop() error -} - -var ( - ErrObjectConfigRequired = errors.New("nats: object-store config required") - ErrBadObjectMeta = errors.New("nats: object-store meta information invalid") - ErrObjectNotFound = errors.New("nats: object not found") - ErrInvalidStoreName = errors.New("nats: invalid object-store name") - ErrDigestMismatch = errors.New("nats: received a corrupt object, digests do not match") - ErrInvalidDigestFormat = errors.New("nats: object digest hash has invalid format") - ErrNoObjectsFound = errors.New("nats: no objects found") - ErrObjectAlreadyExists = errors.New("nats: an object already exists with that name") - ErrNameRequired = errors.New("nats: name is required") - ErrNeeds262 = errors.New("nats: object-store requires at least server version 2.6.2") - ErrLinkNotAllowed = errors.New("nats: link cannot be set when putting the object in bucket") - ErrObjectRequired = errors.New("nats: object required") - ErrNoLinkToDeleted = errors.New("nats: not allowed to link to a deleted object") - ErrNoLinkToLink = errors.New("nats: not allowed to link to another link") - ErrCantGetBucket = errors.New("nats: invalid Get, object is a link to a bucket") - ErrBucketRequired = errors.New("nats: bucket required") - ErrBucketMalformed = errors.New("nats: bucket malformed") - ErrUpdateMetaDeleted = errors.New("nats: cannot update meta for a deleted object") -) - -// ObjectStoreConfig is the config for the object store. -type ObjectStoreConfig struct { - Bucket string `json:"bucket"` - Description string `json:"description,omitempty"` - TTL time.Duration `json:"max_age,omitempty"` - MaxBytes int64 `json:"max_bytes,omitempty"` - Storage StorageType `json:"storage,omitempty"` - Replicas int `json:"num_replicas,omitempty"` - Placement *Placement `json:"placement,omitempty"` - - // Bucket-specific metadata - // NOTE: Metadata requires nats-server v2.10.0+ - Metadata map[string]string `json:"metadata,omitempty"` - // Enable underlying stream compression. - // NOTE: Compression is supported for nats-server 2.10.0+ - Compression bool -} - -type ObjectStoreStatus interface { - // Bucket is the name of the bucket - Bucket() string - // Description is the description supplied when creating the bucket - Description() string - // TTL indicates how long objects are kept in the bucket - TTL() time.Duration - // Storage indicates the underlying JetStream storage technology used to store data - Storage() StorageType - // Replicas indicates how many storage replicas are kept for the data in the bucket - Replicas() int - // Sealed indicates the stream is sealed and cannot be modified in any way - Sealed() bool - // Size is the combined size of all data in the bucket including metadata, in bytes - Size() uint64 - // BackingStore provides details about the underlying storage - BackingStore() string - // Metadata is the user supplied metadata for the bucket - Metadata() map[string]string - // IsCompressed indicates if the data is compressed on disk - IsCompressed() bool -} - -// ObjectMetaOptions -type ObjectMetaOptions struct { - Link *ObjectLink `json:"link,omitempty"` - ChunkSize uint32 `json:"max_chunk_size,omitempty"` -} - -// ObjectMeta is high level information about an object. -type ObjectMeta struct { - Name string `json:"name"` - Description string `json:"description,omitempty"` - Headers Header `json:"headers,omitempty"` - Metadata map[string]string `json:"metadata,omitempty"` - - // Optional options. - Opts *ObjectMetaOptions `json:"options,omitempty"` -} - -// ObjectInfo is meta plus instance information. -type ObjectInfo struct { - ObjectMeta - Bucket string `json:"bucket"` - NUID string `json:"nuid"` - Size uint64 `json:"size"` - ModTime time.Time `json:"mtime"` - Chunks uint32 `json:"chunks"` - Digest string `json:"digest,omitempty"` - Deleted bool `json:"deleted,omitempty"` -} - -// ObjectLink is used to embed links to other buckets and objects. -type ObjectLink struct { - // Bucket is the name of the other object store. - Bucket string `json:"bucket"` - // Name can be used to link to a single object. - // If empty means this is a link to the whole store, like a directory. - Name string `json:"name,omitempty"` -} - -// ObjectResult will return the underlying stream info and also be an io.ReadCloser. -type ObjectResult interface { - io.ReadCloser - Info() (*ObjectInfo, error) - Error() error -} - -const ( - objNameTmpl = "OBJ_%s" // OBJ_ // stream name - objAllChunksPreTmpl = "$O.%s.C.>" // $O..C.> // chunk stream subject - objAllMetaPreTmpl = "$O.%s.M.>" // $O..M.> // meta stream subject - objChunksPreTmpl = "$O.%s.C.%s" // $O..C. // chunk message subject - objMetaPreTmpl = "$O.%s.M.%s" // $O..M. // meta message subject - objNoPending = "0" - objDefaultChunkSize = uint32(128 * 1024) // 128k - objDigestType = "SHA-256=" - objDigestTmpl = objDigestType + "%s" -) - -type obs struct { - name string - stream string - js *js -} - -// CreateObjectStore will create an object store. -func (js *js) CreateObjectStore(cfg *ObjectStoreConfig) (ObjectStore, error) { - if !js.nc.serverMinVersion(2, 6, 2) { - return nil, ErrNeeds262 - } - if cfg == nil { - return nil, ErrObjectConfigRequired - } - if !validBucketRe.MatchString(cfg.Bucket) { - return nil, ErrInvalidStoreName - } - - name := cfg.Bucket - chunks := fmt.Sprintf(objAllChunksPreTmpl, name) - meta := fmt.Sprintf(objAllMetaPreTmpl, name) - - // We will set explicitly some values so that we can do comparison - // if we get an "already in use" error and need to check if it is same. - // See kv - replicas := cfg.Replicas - if replicas == 0 { - replicas = 1 - } - maxBytes := cfg.MaxBytes - if maxBytes == 0 { - maxBytes = -1 - } - var compression StoreCompression - if cfg.Compression { - compression = S2Compression - } - scfg := &StreamConfig{ - Name: fmt.Sprintf(objNameTmpl, name), - Description: cfg.Description, - Subjects: []string{chunks, meta}, - MaxAge: cfg.TTL, - MaxBytes: maxBytes, - Storage: cfg.Storage, - Replicas: replicas, - Placement: cfg.Placement, - Discard: DiscardNew, - AllowRollup: true, - AllowDirect: true, - Metadata: cfg.Metadata, - Compression: compression, - } - - // Create our stream. - _, err := js.AddStream(scfg) - if err != nil { - return nil, err - } - - return &obs{name: name, stream: scfg.Name, js: js}, nil -} - -// ObjectStore will look up and bind to an existing object store instance. -func (js *js) ObjectStore(bucket string) (ObjectStore, error) { - if !validBucketRe.MatchString(bucket) { - return nil, ErrInvalidStoreName - } - if !js.nc.serverMinVersion(2, 6, 2) { - return nil, ErrNeeds262 - } - - stream := fmt.Sprintf(objNameTmpl, bucket) - si, err := js.StreamInfo(stream) - if err != nil { - return nil, err - } - return &obs{name: bucket, stream: si.Config.Name, js: js}, nil -} - -// DeleteObjectStore will delete the underlying stream for the named object. -func (js *js) DeleteObjectStore(bucket string) error { - stream := fmt.Sprintf(objNameTmpl, bucket) - return js.DeleteStream(stream) -} - -func encodeName(name string) string { - return base64.URLEncoding.EncodeToString([]byte(name)) -} - -// Put will place the contents from the reader into this object-store. -func (obs *obs) Put(meta *ObjectMeta, r io.Reader, opts ...ObjectOpt) (*ObjectInfo, error) { - if meta == nil || meta.Name == "" { - return nil, ErrBadObjectMeta - } - - if meta.Opts == nil { - meta.Opts = &ObjectMetaOptions{ChunkSize: objDefaultChunkSize} - } else if meta.Opts.Link != nil { - return nil, ErrLinkNotAllowed - } else if meta.Opts.ChunkSize == 0 { - meta.Opts.ChunkSize = objDefaultChunkSize - } - - var o objOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureObject(&o); err != nil { - return nil, err - } - } - } - ctx := o.ctx - - // Create the new nuid so chunks go on a new subject if the name is re-used - newnuid := nuid.Next() - - // These will be used in more than one place - chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, newnuid) - - // Grab existing meta info (einfo). Ok to be found or not found, any other error is a problem - // Chunks on the old nuid can be cleaned up at the end - einfo, err := obs.GetInfo(meta.Name, GetObjectInfoShowDeleted()) // GetInfo will encode the name - if err != nil && err != ErrObjectNotFound { - return nil, err - } - - // For async error handling - var perr error - var mu sync.Mutex - setErr := func(err error) { - mu.Lock() - defer mu.Unlock() - perr = err - } - getErr := func() error { - mu.Lock() - defer mu.Unlock() - return perr - } - - // Create our own JS context to handle errors etc. - jetStream, err := obs.js.nc.JetStream(PublishAsyncErrHandler(func(js JetStream, _ *Msg, err error) { setErr(err) })) - if err != nil { - return nil, err - } - - defer jetStream.(*js).cleanupReplySub() - - purgePartial := func() error { - // wait until all pubs are complete or up to default timeout before attempting purge - select { - case <-jetStream.PublishAsyncComplete(): - case <-time.After(obs.js.opts.wait): - } - if err := obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: chunkSubj}); err != nil { - return fmt.Errorf("could not cleanup bucket after erroneous put operation: %w", err) - } - return nil - } - - m, h := NewMsg(chunkSubj), sha256.New() - chunk, sent, total := make([]byte, meta.Opts.ChunkSize), 0, uint64(0) - - // set up the info object. The chunk upload sets the size and digest - info := &ObjectInfo{Bucket: obs.name, NUID: newnuid, ObjectMeta: *meta} - - for r != nil { - if ctx != nil { - select { - case <-ctx.Done(): - if ctx.Err() == context.Canceled { - err = ctx.Err() - } else { - err = ErrTimeout - } - default: - } - if err != nil { - if purgeErr := purgePartial(); purgeErr != nil { - return nil, errors.Join(err, purgeErr) - } - return nil, err - } - } - - // Actual read. - // TODO(dlc) - Deadline? - n, readErr := r.Read(chunk) - - // Handle all non EOF errors - if readErr != nil && readErr != io.EOF { - if purgeErr := purgePartial(); purgeErr != nil { - return nil, errors.Join(readErr, purgeErr) - } - return nil, readErr - } - - // Add chunk only if we received data - if n > 0 { - // Chunk processing. - m.Data = chunk[:n] - h.Write(m.Data) - - // Send msg itself. - if _, err := jetStream.PublishMsgAsync(m); err != nil { - if purgeErr := purgePartial(); purgeErr != nil { - return nil, errors.Join(err, purgeErr) - } - return nil, err - } - if err := getErr(); err != nil { - if purgeErr := purgePartial(); purgeErr != nil { - return nil, errors.Join(err, purgeErr) - } - return nil, err - } - // Update totals. - sent++ - total += uint64(n) - } - - // EOF Processing. - if readErr == io.EOF { - // Place meta info. - info.Size, info.Chunks = uint64(total), uint32(sent) - info.Digest = GetObjectDigestValue(h) - break - } - } - - // Prepare the meta message - metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(meta.Name)) - mm := NewMsg(metaSubj) - mm.Header.Set(MsgRollup, MsgRollupSubject) - mm.Data, err = json.Marshal(info) - if err != nil { - if r != nil { - if purgeErr := purgePartial(); purgeErr != nil { - return nil, errors.Join(err, purgeErr) - } - } - return nil, err - } - - // Publish the meta message. - _, err = jetStream.PublishMsgAsync(mm) - if err != nil { - if r != nil { - if purgeErr := purgePartial(); purgeErr != nil { - return nil, errors.Join(err, purgeErr) - } - } - return nil, err - } - - // Wait for all to be processed. - select { - case <-jetStream.PublishAsyncComplete(): - if err := getErr(); err != nil { - if r != nil { - if purgeErr := purgePartial(); purgeErr != nil { - return nil, errors.Join(err, purgeErr) - } - } - return nil, err - } - case <-time.After(obs.js.opts.wait): - return nil, ErrTimeout - } - - info.ModTime = time.Now().UTC() // This time is not actually the correct time - - // Delete any original chunks. - if einfo != nil && !einfo.Deleted { - echunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, einfo.NUID) - if err := obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: echunkSubj}); err != nil { - return info, err - } - } - - // TODO would it be okay to do this to return the info with the correct time? - // With the understanding that it is an extra call to the server. - // Otherwise the time the user gets back is the client time, not the server time. - // return obs.GetInfo(info.Name) - - return info, nil -} - -// GetObjectDigestValue calculates the base64 value of hashed data -func GetObjectDigestValue(data hash.Hash) string { - sha := data.Sum(nil) - return fmt.Sprintf(objDigestTmpl, base64.URLEncoding.EncodeToString(sha[:])) -} - -// DecodeObjectDigest decodes base64 hash -func DecodeObjectDigest(data string) ([]byte, error) { - digest := strings.SplitN(data, "=", 2) - if len(digest) != 2 { - return nil, ErrInvalidDigestFormat - } - return base64.URLEncoding.DecodeString(digest[1]) -} - -// ObjectResult impl. -type objResult struct { - sync.Mutex - info *ObjectInfo - r io.ReadCloser - err error - ctx context.Context - digest hash.Hash - readTimeout time.Duration -} - -func (info *ObjectInfo) isLink() bool { - return info.ObjectMeta.Opts != nil && info.ObjectMeta.Opts.Link != nil -} - -type GetObjectOpt interface { - configureGetObject(opts *getObjectOpts) error -} -type getObjectOpts struct { - ctx context.Context - // Include deleted object in the result. - showDeleted bool -} - -type getObjectFn func(opts *getObjectOpts) error - -func (opt getObjectFn) configureGetObject(opts *getObjectOpts) error { - return opt(opts) -} - -// GetObjectShowDeleted makes Get() return object if it was marked as deleted. -func GetObjectShowDeleted() GetObjectOpt { - return getObjectFn(func(opts *getObjectOpts) error { - opts.showDeleted = true - return nil - }) -} - -// For nats.Context() support. -func (ctx ContextOpt) configureGetObject(opts *getObjectOpts) error { - opts.ctx = ctx - return nil -} - -// Get will pull the object from the underlying stream. -func (obs *obs) Get(name string, opts ...GetObjectOpt) (ObjectResult, error) { - var o getObjectOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureGetObject(&o); err != nil { - return nil, err - } - } - } - ctx := o.ctx - infoOpts := make([]GetObjectInfoOpt, 0) - if ctx != nil { - infoOpts = append(infoOpts, Context(ctx)) - } - if o.showDeleted { - infoOpts = append(infoOpts, GetObjectInfoShowDeleted()) - } - - // Grab meta info. - info, err := obs.GetInfo(name, infoOpts...) - if err != nil { - return nil, err - } - if info.NUID == _EMPTY_ { - return nil, ErrBadObjectMeta - } - - // Check for object links. If single objects we do a pass through. - if info.isLink() { - if info.ObjectMeta.Opts.Link.Name == _EMPTY_ { - return nil, ErrCantGetBucket - } - - // is the link in the same bucket? - lbuck := info.ObjectMeta.Opts.Link.Bucket - if lbuck == obs.name { - return obs.Get(info.ObjectMeta.Opts.Link.Name) - } - - // different bucket - lobs, err := obs.js.ObjectStore(lbuck) - if err != nil { - return nil, err - } - return lobs.Get(info.ObjectMeta.Opts.Link.Name) - } - - result := &objResult{info: info, ctx: ctx, readTimeout: obs.js.opts.wait} - if info.Size == 0 { - return result, nil - } - - pr, pw := net.Pipe() - result.r = pr - - gotErr := func(m *Msg, err error) { - pw.Close() - m.Sub.Unsubscribe() - result.setErr(err) - } - - // For calculating sum256 - result.digest = sha256.New() - - processChunk := func(m *Msg) { - var err error - if ctx != nil { - select { - case <-ctx.Done(): - if errors.Is(ctx.Err(), context.Canceled) { - err = ctx.Err() - } else { - err = ErrTimeout - } - default: - } - if err != nil { - gotErr(m, err) - return - } - } - - tokens, err := parser.GetMetadataFields(m.Reply) - if err != nil { - gotErr(m, err) - return - } - - // Write to our pipe. - for b := m.Data; len(b) > 0; { - n, err := pw.Write(b) - if err != nil { - gotErr(m, err) - return - } - b = b[n:] - } - // Update sha256 - result.digest.Write(m.Data) - - // Check if we are done. - if tokens[parser.AckNumPendingTokenPos] == objNoPending { - pw.Close() - m.Sub.Unsubscribe() - } - } - - chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID) - _, err = obs.js.Subscribe(chunkSubj, processChunk, OrderedConsumer()) - if err != nil { - return nil, err - } - - return result, nil -} - -// Delete will delete the object. -func (obs *obs) Delete(name string) error { - // Grab meta info. - info, err := obs.GetInfo(name, GetObjectInfoShowDeleted()) - if err != nil { - return err - } - if info.NUID == _EMPTY_ { - return ErrBadObjectMeta - } - - // Place a rollup delete marker and publish the info - info.Deleted = true - info.Size, info.Chunks, info.Digest = 0, 0, _EMPTY_ - - if err = publishMeta(info, obs.js); err != nil { - return err - } - - // Purge chunks for the object. - chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID) - return obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: chunkSubj}) -} - -func publishMeta(info *ObjectInfo, js JetStreamContext) error { - // marshal the object into json, don't store an actual time - info.ModTime = time.Time{} - data, err := json.Marshal(info) - if err != nil { - return err - } - - // Prepare and publish the message. - mm := NewMsg(fmt.Sprintf(objMetaPreTmpl, info.Bucket, encodeName(info.ObjectMeta.Name))) - mm.Header.Set(MsgRollup, MsgRollupSubject) - mm.Data = data - if _, err := js.PublishMsg(mm); err != nil { - return err - } - - // set the ModTime in case it's returned to the user, even though it's not the correct time. - info.ModTime = time.Now().UTC() - return nil -} - -// AddLink will add a link to another object if it's not deleted and not another link -// name is the name of this link object -// obj is what is being linked too -func (obs *obs) AddLink(name string, obj *ObjectInfo) (*ObjectInfo, error) { - if name == "" { - return nil, ErrNameRequired - } - - // TODO Handle stale info - - if obj == nil || obj.Name == "" { - return nil, ErrObjectRequired - } - if obj.Deleted { - return nil, ErrNoLinkToDeleted - } - if obj.isLink() { - return nil, ErrNoLinkToLink - } - - // If object with link's name is found, error. - // If link with link's name is found, that's okay to overwrite. - // If there was an error that was not ErrObjectNotFound, error. - einfo, err := obs.GetInfo(name, GetObjectInfoShowDeleted()) - if einfo != nil { - if !einfo.isLink() { - return nil, ErrObjectAlreadyExists - } - } else if err != ErrObjectNotFound { - return nil, err - } - - // create the meta for the link - meta := &ObjectMeta{ - Name: name, - Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: obj.Bucket, Name: obj.Name}}, - } - info := &ObjectInfo{Bucket: obs.name, NUID: nuid.Next(), ModTime: time.Now().UTC(), ObjectMeta: *meta} - - // put the link object - if err = publishMeta(info, obs.js); err != nil { - return nil, err - } - - return info, nil -} - -// AddBucketLink will add a link to another object store. -func (ob *obs) AddBucketLink(name string, bucket ObjectStore) (*ObjectInfo, error) { - if name == "" { - return nil, ErrNameRequired - } - if bucket == nil { - return nil, ErrBucketRequired - } - bos, ok := bucket.(*obs) - if !ok { - return nil, ErrBucketMalformed - } - - // If object with link's name is found, error. - // If link with link's name is found, that's okay to overwrite. - // If there was an error that was not ErrObjectNotFound, error. - einfo, err := ob.GetInfo(name, GetObjectInfoShowDeleted()) - if einfo != nil { - if !einfo.isLink() { - return nil, ErrObjectAlreadyExists - } - } else if err != ErrObjectNotFound { - return nil, err - } - - // create the meta for the link - meta := &ObjectMeta{ - Name: name, - Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: bos.name}}, - } - info := &ObjectInfo{Bucket: ob.name, NUID: nuid.Next(), ObjectMeta: *meta} - - // put the link object - err = publishMeta(info, ob.js) - if err != nil { - return nil, err - } - - return info, nil -} - -// PutBytes is convenience function to put a byte slice into this object store. -func (obs *obs) PutBytes(name string, data []byte, opts ...ObjectOpt) (*ObjectInfo, error) { - return obs.Put(&ObjectMeta{Name: name}, bytes.NewReader(data), opts...) -} - -// GetBytes is a convenience function to pull an object from this object store and return it as a byte slice. -func (obs *obs) GetBytes(name string, opts ...GetObjectOpt) ([]byte, error) { - result, err := obs.Get(name, opts...) - if err != nil { - return nil, err - } - defer result.Close() - - var b bytes.Buffer - if _, err := b.ReadFrom(result); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -// PutString is convenience function to put a string into this object store. -func (obs *obs) PutString(name string, data string, opts ...ObjectOpt) (*ObjectInfo, error) { - return obs.Put(&ObjectMeta{Name: name}, strings.NewReader(data), opts...) -} - -// GetString is a convenience function to pull an object from this object store and return it as a string. -func (obs *obs) GetString(name string, opts ...GetObjectOpt) (string, error) { - result, err := obs.Get(name, opts...) - if err != nil { - return _EMPTY_, err - } - defer result.Close() - - var b bytes.Buffer - if _, err := b.ReadFrom(result); err != nil { - return _EMPTY_, err - } - return b.String(), nil -} - -// PutFile is convenience function to put a file into an object store. -func (obs *obs) PutFile(file string, opts ...ObjectOpt) (*ObjectInfo, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - return obs.Put(&ObjectMeta{Name: file}, f, opts...) -} - -// GetFile is a convenience function to pull and object and place in a file. -func (obs *obs) GetFile(name, file string, opts ...GetObjectOpt) error { - // Expect file to be new. - f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE, 0600) - if err != nil { - return err - } - defer f.Close() - - result, err := obs.Get(name, opts...) - if err != nil { - os.Remove(f.Name()) - return err - } - defer result.Close() - - // Stream copy to the file. - _, err = io.Copy(f, result) - return err -} - -type GetObjectInfoOpt interface { - configureGetInfo(opts *getObjectInfoOpts) error -} -type getObjectInfoOpts struct { - ctx context.Context - // Include deleted object in the result. - showDeleted bool -} - -type getObjectInfoFn func(opts *getObjectInfoOpts) error - -func (opt getObjectInfoFn) configureGetInfo(opts *getObjectInfoOpts) error { - return opt(opts) -} - -// GetObjectInfoShowDeleted makes GetInfo() return object if it was marked as deleted. -func GetObjectInfoShowDeleted() GetObjectInfoOpt { - return getObjectInfoFn(func(opts *getObjectInfoOpts) error { - opts.showDeleted = true - return nil - }) -} - -// For nats.Context() support. -func (ctx ContextOpt) configureGetInfo(opts *getObjectInfoOpts) error { - opts.ctx = ctx - return nil -} - -// GetInfo will retrieve the current information for the object. -func (obs *obs) GetInfo(name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error) { - // Grab last meta value we have. - if name == "" { - return nil, ErrNameRequired - } - var o getObjectInfoOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureGetInfo(&o); err != nil { - return nil, err - } - } - } - - metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name)) // used as data in a JS API call - stream := fmt.Sprintf(objNameTmpl, obs.name) - - m, err := obs.js.GetLastMsg(stream, metaSubj) - if err != nil { - if errors.Is(err, ErrMsgNotFound) { - err = ErrObjectNotFound - } - return nil, err - } - var info ObjectInfo - if err := json.Unmarshal(m.Data, &info); err != nil { - return nil, ErrBadObjectMeta - } - if !o.showDeleted && info.Deleted { - return nil, ErrObjectNotFound - } - info.ModTime = m.Time - return &info, nil -} - -// UpdateMeta will update the meta for the object. -func (obs *obs) UpdateMeta(name string, meta *ObjectMeta) error { - if meta == nil { - return ErrBadObjectMeta - } - - // Grab the current meta. - info, err := obs.GetInfo(name) - if err != nil { - if errors.Is(err, ErrObjectNotFound) { - return ErrUpdateMetaDeleted - } - return err - } - - // If the new name is different from the old, and it exists, error - // If there was an error that was not ErrObjectNotFound, error. - if name != meta.Name { - existingInfo, err := obs.GetInfo(meta.Name, GetObjectInfoShowDeleted()) - if err != nil && !errors.Is(err, ErrObjectNotFound) { - return err - } - if err == nil && !existingInfo.Deleted { - return ErrObjectAlreadyExists - } - } - - // Update Meta prevents update of ObjectMetaOptions (Link, ChunkSize) - // These should only be updated internally when appropriate. - info.Name = meta.Name - info.Description = meta.Description - info.Headers = meta.Headers - info.Metadata = meta.Metadata - - // Prepare the meta message - if err = publishMeta(info, obs.js); err != nil { - return err - } - - // did the name of this object change? We just stored the meta under the new name - // so delete the meta from the old name via purge stream for subject - if name != meta.Name { - metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name)) - return obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: metaSubj}) - } - - return nil -} - -// Seal will seal the object store, no further modifications will be allowed. -func (obs *obs) Seal() error { - stream := fmt.Sprintf(objNameTmpl, obs.name) - si, err := obs.js.StreamInfo(stream) - if err != nil { - return err - } - // Seal the stream from being able to take on more messages. - cfg := si.Config - cfg.Sealed = true - _, err = obs.js.UpdateStream(&cfg) - return err -} - -// Implementation for Watch -type objWatcher struct { - updates chan *ObjectInfo - sub *Subscription -} - -// Updates returns the interior channel. -func (w *objWatcher) Updates() <-chan *ObjectInfo { - if w == nil { - return nil - } - return w.updates -} - -// Stop will unsubscribe from the watcher. -func (w *objWatcher) Stop() error { - if w == nil { - return nil - } - return w.sub.Unsubscribe() -} - -// Watch for changes in the underlying store and receive meta information updates. -func (obs *obs) Watch(opts ...WatchOpt) (ObjectWatcher, error) { - var o watchOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureWatcher(&o); err != nil { - return nil, err - } - } - } - - var initDoneMarker bool - - w := &objWatcher{updates: make(chan *ObjectInfo, 32)} - - update := func(m *Msg) { - var info ObjectInfo - if err := json.Unmarshal(m.Data, &info); err != nil { - return // TODO(dlc) - Communicate this upwards? - } - meta, err := m.Metadata() - if err != nil { - return - } - - if !o.ignoreDeletes || !info.Deleted { - info.ModTime = meta.Timestamp - w.updates <- &info - } - - // if UpdatesOnly is set, no not send nil to the channel - // as it would always be triggered after initializing the watcher - if !initDoneMarker && meta.NumPending == 0 { - initDoneMarker = true - w.updates <- nil - } - } - - allMeta := fmt.Sprintf(objAllMetaPreTmpl, obs.name) - _, err := obs.js.GetLastMsg(obs.stream, allMeta) - // if there are no messages on the stream and we are not watching - // updates only, send nil to the channel to indicate that the initial - // watch is done - if !o.updatesOnly { - if errors.Is(err, ErrMsgNotFound) { - initDoneMarker = true - w.updates <- nil - } - } else { - // if UpdatesOnly was used, mark initialization as complete - initDoneMarker = true - } - - // Used ordered consumer to deliver results. - subOpts := []SubOpt{OrderedConsumer()} - if !o.includeHistory { - subOpts = append(subOpts, DeliverLastPerSubject()) - } - if o.updatesOnly { - subOpts = append(subOpts, DeliverNew()) - } - sub, err := obs.js.Subscribe(allMeta, update, subOpts...) - if err != nil { - return nil, err - } - w.sub = sub - return w, nil -} - -type ListObjectsOpt interface { - configureListObjects(opts *listObjectOpts) error -} -type listObjectOpts struct { - ctx context.Context - // Include deleted objects in the result channel. - showDeleted bool -} - -type listObjectsFn func(opts *listObjectOpts) error - -func (opt listObjectsFn) configureListObjects(opts *listObjectOpts) error { - return opt(opts) -} - -// ListObjectsShowDeleted makes ListObjects() return deleted objects. -func ListObjectsShowDeleted() ListObjectsOpt { - return listObjectsFn(func(opts *listObjectOpts) error { - opts.showDeleted = true - return nil - }) -} - -// For nats.Context() support. -func (ctx ContextOpt) configureListObjects(opts *listObjectOpts) error { - opts.ctx = ctx - return nil -} - -// List will list all the objects in this store. -func (obs *obs) List(opts ...ListObjectsOpt) ([]*ObjectInfo, error) { - var o listObjectOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureListObjects(&o); err != nil { - return nil, err - } - } - } - watchOpts := make([]WatchOpt, 0) - if !o.showDeleted { - watchOpts = append(watchOpts, IgnoreDeletes()) - } - watcher, err := obs.Watch(watchOpts...) - if err != nil { - return nil, err - } - defer watcher.Stop() - if o.ctx == nil { - o.ctx = context.Background() - } - - var objs []*ObjectInfo - updates := watcher.Updates() -Updates: - for { - select { - case entry := <-updates: - if entry == nil { - break Updates - } - objs = append(objs, entry) - case <-o.ctx.Done(): - return nil, o.ctx.Err() - } - } - if len(objs) == 0 { - return nil, ErrNoObjectsFound - } - return objs, nil -} - -// ObjectBucketStatus represents status of a Bucket, implements ObjectStoreStatus -type ObjectBucketStatus struct { - nfo *StreamInfo - bucket string -} - -// Bucket is the name of the bucket -func (s *ObjectBucketStatus) Bucket() string { return s.bucket } - -// Description is the description supplied when creating the bucket -func (s *ObjectBucketStatus) Description() string { return s.nfo.Config.Description } - -// TTL indicates how long objects are kept in the bucket -func (s *ObjectBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge } - -// Storage indicates the underlying JetStream storage technology used to store data -func (s *ObjectBucketStatus) Storage() StorageType { return s.nfo.Config.Storage } - -// Replicas indicates how many storage replicas are kept for the data in the bucket -func (s *ObjectBucketStatus) Replicas() int { return s.nfo.Config.Replicas } - -// Sealed indicates the stream is sealed and cannot be modified in any way -func (s *ObjectBucketStatus) Sealed() bool { return s.nfo.Config.Sealed } - -// Size is the combined size of all data in the bucket including metadata, in bytes -func (s *ObjectBucketStatus) Size() uint64 { return s.nfo.State.Bytes } - -// BackingStore indicates what technology is used for storage of the bucket -func (s *ObjectBucketStatus) BackingStore() string { return "JetStream" } - -// Metadata is the metadata supplied when creating the bucket -func (s *ObjectBucketStatus) Metadata() map[string]string { return s.nfo.Config.Metadata } - -// StreamInfo is the stream info retrieved to create the status -func (s *ObjectBucketStatus) StreamInfo() *StreamInfo { return s.nfo } - -// IsCompressed indicates if the data is compressed on disk -func (s *ObjectBucketStatus) IsCompressed() bool { return s.nfo.Config.Compression != NoCompression } - -// Status retrieves run-time status about a bucket -func (obs *obs) Status() (ObjectStoreStatus, error) { - nfo, err := obs.js.StreamInfo(obs.stream) - if err != nil { - return nil, err - } - - status := &ObjectBucketStatus{ - nfo: nfo, - bucket: obs.name, - } - - return status, nil -} - -// Read impl. -func (o *objResult) Read(p []byte) (n int, err error) { - o.Lock() - defer o.Unlock() - readDeadline := time.Now().Add(o.readTimeout) - if ctx := o.ctx; ctx != nil { - if deadline, ok := ctx.Deadline(); ok { - readDeadline = deadline - } - select { - case <-ctx.Done(): - if ctx.Err() == context.Canceled { - o.err = ctx.Err() - } else { - o.err = ErrTimeout - } - default: - } - } - if o.err != nil { - return 0, o.err - } - if o.r == nil { - return 0, io.EOF - } - - r := o.r.(net.Conn) - r.SetReadDeadline(readDeadline) - n, err = r.Read(p) - if err, ok := err.(net.Error); ok && err.Timeout() { - if ctx := o.ctx; ctx != nil { - select { - case <-ctx.Done(): - if ctx.Err() == context.Canceled { - return 0, ctx.Err() - } else { - return 0, ErrTimeout - } - default: - err = nil - } - } - } - if err == io.EOF { - // Make sure the digest matches. - sha := o.digest.Sum(nil) - rsha, decodeErr := DecodeObjectDigest(o.info.Digest) - if decodeErr != nil { - o.err = decodeErr - return 0, o.err - } - if !bytes.Equal(sha[:], rsha) { - o.err = ErrDigestMismatch - return 0, o.err - } - } - return n, err -} - -// Close impl. -func (o *objResult) Close() error { - o.Lock() - defer o.Unlock() - if o.r == nil { - return nil - } - return o.r.Close() -} - -func (o *objResult) setErr(err error) { - o.Lock() - defer o.Unlock() - o.err = err -} - -func (o *objResult) Info() (*ObjectInfo, error) { - o.Lock() - defer o.Unlock() - return o.info, o.err -} - -func (o *objResult) Error() error { - o.Lock() - defer o.Unlock() - return o.err -} - -// ObjectStoreNames is used to retrieve a list of bucket names -func (js *js) ObjectStoreNames(opts ...ObjectOpt) <-chan string { - var o objOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureObject(&o); err != nil { - return nil - } - } - } - ch := make(chan string) - var cancel context.CancelFunc - if o.ctx == nil { - o.ctx, cancel = context.WithTimeout(context.Background(), defaultRequestWait) - } - l := &streamLister{js: js} - l.js.opts.streamListSubject = fmt.Sprintf(objAllChunksPreTmpl, "*") - l.js.opts.ctx = o.ctx - go func() { - if cancel != nil { - defer cancel() - } - defer close(ch) - for l.Next() { - for _, info := range l.Page() { - if !strings.HasPrefix(info.Config.Name, "OBJ_") { - continue - } - select { - case ch <- info.Config.Name: - case <-o.ctx.Done(): - return - } - } - } - }() - - return ch -} - -// ObjectStores is used to retrieve a list of bucket statuses -func (js *js) ObjectStores(opts ...ObjectOpt) <-chan ObjectStoreStatus { - var o objOpts - for _, opt := range opts { - if opt != nil { - if err := opt.configureObject(&o); err != nil { - return nil - } - } - } - ch := make(chan ObjectStoreStatus) - var cancel context.CancelFunc - if o.ctx == nil { - o.ctx, cancel = context.WithTimeout(context.Background(), defaultRequestWait) - } - l := &streamLister{js: js} - l.js.opts.streamListSubject = fmt.Sprintf(objAllChunksPreTmpl, "*") - l.js.opts.ctx = o.ctx - go func() { - if cancel != nil { - defer cancel() - } - defer close(ch) - for l.Next() { - for _, info := range l.Page() { - if !strings.HasPrefix(info.Config.Name, "OBJ_") { - continue - } - select { - case ch <- &ObjectBucketStatus{ - nfo: info, - bucket: strings.TrimPrefix(info.Config.Name, "OBJ_"), - }: - case <-o.ctx.Done(): - return - } - } - } - }() - - return ch -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/parser.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/parser.go deleted file mode 100644 index 70204e6..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/parser.go +++ /dev/null @@ -1,554 +0,0 @@ -// Copyright 2012-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "fmt" -) - -type msgArg struct { - subject []byte - reply []byte - sid int64 - hdr int - size int -} - -const MAX_CONTROL_LINE_SIZE = 4096 - -type parseState struct { - state int - as int - drop int - hdr int - ma msgArg - argBuf []byte - msgBuf []byte - msgCopied bool - scratch [MAX_CONTROL_LINE_SIZE]byte -} - -const ( - OP_START = iota - OP_PLUS - OP_PLUS_O - OP_PLUS_OK - OP_MINUS - OP_MINUS_E - OP_MINUS_ER - OP_MINUS_ERR - OP_MINUS_ERR_SPC - MINUS_ERR_ARG - OP_M - OP_MS - OP_MSG - OP_MSG_SPC - MSG_ARG - MSG_PAYLOAD - MSG_END - OP_H - OP_P - OP_PI - OP_PIN - OP_PING - OP_PO - OP_PON - OP_PONG - OP_I - OP_IN - OP_INF - OP_INFO - OP_INFO_SPC - INFO_ARG -) - -// parse is the fast protocol parser engine. -func (nc *Conn) parse(buf []byte) error { - var i int - var b byte - - // Move to loop instead of range syntax to allow jumping of i - for i = 0; i < len(buf); i++ { - b = buf[i] - - switch nc.ps.state { - case OP_START: - switch b { - case 'M', 'm': - nc.ps.state = OP_M - nc.ps.hdr = -1 - nc.ps.ma.hdr = -1 - case 'H', 'h': - nc.ps.state = OP_H - nc.ps.hdr = 0 - nc.ps.ma.hdr = 0 - case 'P', 'p': - nc.ps.state = OP_P - case '+': - nc.ps.state = OP_PLUS - case '-': - nc.ps.state = OP_MINUS - case 'I', 'i': - nc.ps.state = OP_I - default: - goto parseErr - } - case OP_H: - switch b { - case 'M', 'm': - nc.ps.state = OP_M - default: - goto parseErr - } - case OP_M: - switch b { - case 'S', 's': - nc.ps.state = OP_MS - default: - goto parseErr - } - case OP_MS: - switch b { - case 'G', 'g': - nc.ps.state = OP_MSG - default: - goto parseErr - } - case OP_MSG: - switch b { - case ' ', '\t': - nc.ps.state = OP_MSG_SPC - default: - goto parseErr - } - case OP_MSG_SPC: - switch b { - case ' ', '\t': - continue - default: - nc.ps.state = MSG_ARG - nc.ps.as = i - } - case MSG_ARG: - switch b { - case '\r': - nc.ps.drop = 1 - case '\n': - var arg []byte - if nc.ps.argBuf != nil { - arg = nc.ps.argBuf - } else { - arg = buf[nc.ps.as : i-nc.ps.drop] - } - if err := nc.processMsgArgs(arg); err != nil { - return err - } - nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, MSG_PAYLOAD - - // jump ahead with the index. If this overruns - // what is left we fall out and process a split buffer. - i = nc.ps.as + nc.ps.ma.size - 1 - default: - if nc.ps.argBuf != nil { - nc.ps.argBuf = append(nc.ps.argBuf, b) - } - } - case MSG_PAYLOAD: - if nc.ps.msgBuf != nil { - if len(nc.ps.msgBuf) >= nc.ps.ma.size { - nc.processMsg(nc.ps.msgBuf) - nc.ps.argBuf, nc.ps.msgBuf, nc.ps.msgCopied, nc.ps.state = nil, nil, false, MSG_END - } else { - // copy as much as we can to the buffer and skip ahead. - toCopy := nc.ps.ma.size - len(nc.ps.msgBuf) - avail := len(buf) - i - - if avail < toCopy { - toCopy = avail - } - - if toCopy > 0 { - start := len(nc.ps.msgBuf) - // This is needed for copy to work. - nc.ps.msgBuf = nc.ps.msgBuf[:start+toCopy] - copy(nc.ps.msgBuf[start:], buf[i:i+toCopy]) - // Update our index - i = (i + toCopy) - 1 - } else { - nc.ps.msgBuf = append(nc.ps.msgBuf, b) - } - } - } else if i-nc.ps.as >= nc.ps.ma.size { - nc.processMsg(buf[nc.ps.as:i]) - nc.ps.argBuf, nc.ps.msgBuf, nc.ps.msgCopied, nc.ps.state = nil, nil, false, MSG_END - } - case MSG_END: - switch b { - case '\n': - nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START - default: - continue - } - case OP_PLUS: - switch b { - case 'O', 'o': - nc.ps.state = OP_PLUS_O - default: - goto parseErr - } - case OP_PLUS_O: - switch b { - case 'K', 'k': - nc.ps.state = OP_PLUS_OK - default: - goto parseErr - } - case OP_PLUS_OK: - switch b { - case '\n': - nc.processOK() - nc.ps.drop, nc.ps.state = 0, OP_START - } - case OP_MINUS: - switch b { - case 'E', 'e': - nc.ps.state = OP_MINUS_E - default: - goto parseErr - } - case OP_MINUS_E: - switch b { - case 'R', 'r': - nc.ps.state = OP_MINUS_ER - default: - goto parseErr - } - case OP_MINUS_ER: - switch b { - case 'R', 'r': - nc.ps.state = OP_MINUS_ERR - default: - goto parseErr - } - case OP_MINUS_ERR: - switch b { - case ' ', '\t': - nc.ps.state = OP_MINUS_ERR_SPC - default: - goto parseErr - } - case OP_MINUS_ERR_SPC: - switch b { - case ' ', '\t': - continue - default: - nc.ps.state = MINUS_ERR_ARG - nc.ps.as = i - } - case MINUS_ERR_ARG: - switch b { - case '\r': - nc.ps.drop = 1 - case '\n': - var arg []byte - if nc.ps.argBuf != nil { - arg = nc.ps.argBuf - nc.ps.argBuf = nil - } else { - arg = buf[nc.ps.as : i-nc.ps.drop] - } - nc.processErr(string(arg)) - nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START - default: - if nc.ps.argBuf != nil { - nc.ps.argBuf = append(nc.ps.argBuf, b) - } - } - case OP_P: - switch b { - case 'I', 'i': - nc.ps.state = OP_PI - case 'O', 'o': - nc.ps.state = OP_PO - default: - goto parseErr - } - case OP_PO: - switch b { - case 'N', 'n': - nc.ps.state = OP_PON - default: - goto parseErr - } - case OP_PON: - switch b { - case 'G', 'g': - nc.ps.state = OP_PONG - default: - goto parseErr - } - case OP_PONG: - switch b { - case '\n': - nc.processPong() - nc.ps.drop, nc.ps.state = 0, OP_START - } - case OP_PI: - switch b { - case 'N', 'n': - nc.ps.state = OP_PIN - default: - goto parseErr - } - case OP_PIN: - switch b { - case 'G', 'g': - nc.ps.state = OP_PING - default: - goto parseErr - } - case OP_PING: - switch b { - case '\n': - nc.processPing() - nc.ps.drop, nc.ps.state = 0, OP_START - } - case OP_I: - switch b { - case 'N', 'n': - nc.ps.state = OP_IN - default: - goto parseErr - } - case OP_IN: - switch b { - case 'F', 'f': - nc.ps.state = OP_INF - default: - goto parseErr - } - case OP_INF: - switch b { - case 'O', 'o': - nc.ps.state = OP_INFO - default: - goto parseErr - } - case OP_INFO: - switch b { - case ' ', '\t': - nc.ps.state = OP_INFO_SPC - default: - goto parseErr - } - case OP_INFO_SPC: - switch b { - case ' ', '\t': - continue - default: - nc.ps.state = INFO_ARG - nc.ps.as = i - } - case INFO_ARG: - switch b { - case '\r': - nc.ps.drop = 1 - case '\n': - var arg []byte - if nc.ps.argBuf != nil { - arg = nc.ps.argBuf - nc.ps.argBuf = nil - } else { - arg = buf[nc.ps.as : i-nc.ps.drop] - } - nc.processAsyncInfo(arg) - nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START - default: - if nc.ps.argBuf != nil { - nc.ps.argBuf = append(nc.ps.argBuf, b) - } - } - default: - goto parseErr - } - } - // Check for split buffer scenarios - if (nc.ps.state == MSG_ARG || nc.ps.state == MINUS_ERR_ARG || nc.ps.state == INFO_ARG) && nc.ps.argBuf == nil { - nc.ps.argBuf = nc.ps.scratch[:0] - nc.ps.argBuf = append(nc.ps.argBuf, buf[nc.ps.as:i-nc.ps.drop]...) - // FIXME, check max len - } - // Check for split msg - if nc.ps.state == MSG_PAYLOAD && nc.ps.msgBuf == nil { - // We need to clone the msgArg if it is still referencing the - // read buffer and we are not able to process the msg. - if nc.ps.argBuf == nil { - nc.cloneMsgArg() - } - - // If we will overflow the scratch buffer, just create a - // new buffer to hold the split message. - if nc.ps.ma.size > cap(nc.ps.scratch)-len(nc.ps.argBuf) { - lrem := len(buf[nc.ps.as:]) - - nc.ps.msgBuf = make([]byte, lrem, nc.ps.ma.size) - copy(nc.ps.msgBuf, buf[nc.ps.as:]) - nc.ps.msgCopied = true - } else { - nc.ps.msgBuf = nc.ps.scratch[len(nc.ps.argBuf):len(nc.ps.argBuf)] - nc.ps.msgBuf = append(nc.ps.msgBuf, (buf[nc.ps.as:])...) - } - } - - return nil - -parseErr: - return fmt.Errorf("nats: Parse Error [%d]: '%s'", nc.ps.state, buf[i:]) -} - -// cloneMsgArg is used when the split buffer scenario has the pubArg in the existing read buffer, but -// we need to hold onto it into the next read. -func (nc *Conn) cloneMsgArg() { - nc.ps.argBuf = nc.ps.scratch[:0] - nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.subject...) - nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.reply...) - nc.ps.ma.subject = nc.ps.argBuf[:len(nc.ps.ma.subject)] - if nc.ps.ma.reply != nil { - nc.ps.ma.reply = nc.ps.argBuf[len(nc.ps.ma.subject):] - } -} - -const argsLenMax = 4 - -func (nc *Conn) processMsgArgs(arg []byte) error { - // Use separate function for header based messages. - if nc.ps.hdr >= 0 { - return nc.processHeaderMsgArgs(arg) - } - - // Unroll splitArgs to avoid runtime/heap issues - a := [argsLenMax][]byte{} - args := a[:0] - start := -1 - for i, b := range arg { - switch b { - case ' ', '\t', '\r', '\n': - if start >= 0 { - args = append(args, arg[start:i]) - start = -1 - } - default: - if start < 0 { - start = i - } - } - } - if start >= 0 { - args = append(args, arg[start:]) - } - - switch len(args) { - case 3: - nc.ps.ma.subject = args[0] - nc.ps.ma.sid = parseInt64(args[1]) - nc.ps.ma.reply = nil - nc.ps.ma.size = int(parseInt64(args[2])) - case 4: - nc.ps.ma.subject = args[0] - nc.ps.ma.sid = parseInt64(args[1]) - nc.ps.ma.reply = args[2] - nc.ps.ma.size = int(parseInt64(args[3])) - default: - return fmt.Errorf("nats: processMsgArgs Parse Error: '%s'", arg) - } - if nc.ps.ma.sid < 0 { - return fmt.Errorf("nats: processMsgArgs Bad or Missing Sid: '%s'", arg) - } - if nc.ps.ma.size < 0 { - return fmt.Errorf("nats: processMsgArgs Bad or Missing Size: '%s'", arg) - } - return nil -} - -// processHeaderMsgArgs is for a header based message. -func (nc *Conn) processHeaderMsgArgs(arg []byte) error { - // Unroll splitArgs to avoid runtime/heap issues - a := [argsLenMax][]byte{} - args := a[:0] - start := -1 - for i, b := range arg { - switch b { - case ' ', '\t', '\r', '\n': - if start >= 0 { - args = append(args, arg[start:i]) - start = -1 - } - default: - if start < 0 { - start = i - } - } - } - if start >= 0 { - args = append(args, arg[start:]) - } - - switch len(args) { - case 4: - nc.ps.ma.subject = args[0] - nc.ps.ma.sid = parseInt64(args[1]) - nc.ps.ma.reply = nil - nc.ps.ma.hdr = int(parseInt64(args[2])) - nc.ps.ma.size = int(parseInt64(args[3])) - case 5: - nc.ps.ma.subject = args[0] - nc.ps.ma.sid = parseInt64(args[1]) - nc.ps.ma.reply = args[2] - nc.ps.ma.hdr = int(parseInt64(args[3])) - nc.ps.ma.size = int(parseInt64(args[4])) - default: - return fmt.Errorf("nats: processHeaderMsgArgs Parse Error: '%s'", arg) - } - if nc.ps.ma.sid < 0 { - return fmt.Errorf("nats: processHeaderMsgArgs Bad or Missing Sid: '%s'", arg) - } - if nc.ps.ma.hdr < 0 || nc.ps.ma.hdr > nc.ps.ma.size { - return fmt.Errorf("nats: processHeaderMsgArgs Bad or Missing Header Size: '%s'", arg) - } - if nc.ps.ma.size < 0 { - return fmt.Errorf("nats: processHeaderMsgArgs Bad or Missing Size: '%s'", arg) - } - return nil -} - -// ASCII numbers 0-9 -const ( - ascii_0 = 48 - ascii_9 = 57 -) - -// parseInt64 expects decimal positive numbers. We -// return -1 to signal error -func parseInt64(d []byte) (n int64) { - if len(d) == 0 { - return -1 - } - for _, dec := range d { - if dec < ascii_0 || dec > ascii_9 { - return -1 - } - n = n*10 + (int64(dec) - ascii_0) - } - return n -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/rand.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/rand.go deleted file mode 100644 index 0cdee0a..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/rand.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.20 -// +build !go1.20 - -// A Go client for the NATS messaging system (https://nats.io). -package nats - -import ( - "math/rand" - "time" -) - -func init() { - // This is not needed since Go 1.20 because now rand.Seed always happens - // by default (uses runtime.fastrand64 instead as source). - rand.Seed(time.Now().UnixNano()) -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/testing_internal.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/testing_internal.go deleted file mode 100644 index 1839702..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/testing_internal.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build internal_testing -// +build internal_testing - -// Functions in this file are only available when building nats.go with the -// internal_testing build tag. They are used by the nats.go test suite. -package nats - -// AddMsgFilter adds a message filter for the given subject -// to the connection. The filter will be called for each -// message received on the subject. If the filter returns -// nil, the message will be dropped. -func (nc *Conn) AddMsgFilter(subject string, filter msgFilter) { - nc.subsMu.Lock() - defer nc.subsMu.Unlock() - - if nc.filters == nil { - nc.filters = make(map[string]msgFilter) - } - nc.filters[subject] = filter -} - -// RemoveMsgFilter removes a message filter for the given subject. -func (nc *Conn) RemoveMsgFilter(subject string) { - nc.subsMu.Lock() - defer nc.subsMu.Unlock() - - if nc.filters != nil { - delete(nc.filters, subject) - if len(nc.filters) == 0 { - nc.filters = nil - } - } -} - -// IsJSControlMessage returns true if the message is a JetStream control message. -func IsJSControlMessage(msg *Msg) (bool, int) { - return isJSControlMessage(msg) -} - -// CloseTCPConn closes the underlying TCP connection. -// It can be used to simulate a disconnect. -func (nc *Conn) CloseTCPConn() { - nc.mu.Lock() - defer nc.mu.Unlock() - nc.conn.Close() -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/timer.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/timer.go deleted file mode 100644 index 6edeb4c..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/timer.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2017-2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "sync" - "time" -) - -// global pool of *time.Timer's. can be used by multiple goroutines concurrently. -var globalTimerPool timerPool - -// timerPool provides GC-able pooling of *time.Timer's. -// can be used by multiple goroutines concurrently. -type timerPool struct { - p sync.Pool -} - -// Get returns a timer that completes after the given duration. -func (tp *timerPool) Get(d time.Duration) *time.Timer { - if t, ok := tp.p.Get().(*time.Timer); ok && t != nil { - t.Reset(d) - return t - } - - return time.NewTimer(d) -} - -// Put pools the given timer. -// -// There is no need to call t.Stop() before calling Put. -// -// Put will try to stop the timer before pooling. If the -// given timer already expired, Put will read the unreceived -// value if there is one. -func (tp *timerPool) Put(t *time.Timer) { - if !t.Stop() { - select { - case <-t.C: - default: - } - } - - tp.p.Put(t) -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/util/tls.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/util/tls.go deleted file mode 100644 index af9f51f..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/util/tls.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2017-2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.8 -// +build go1.8 - -package util - -import "crypto/tls" - -// CloneTLSConfig returns a copy of c. -func CloneTLSConfig(c *tls.Config) *tls.Config { - if c == nil { - return &tls.Config{} - } - - return c.Clone() -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/util/tls_go17.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/util/tls_go17.go deleted file mode 100644 index 44d46b4..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/util/tls_go17.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2016-2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.7 && !go1.8 -// +build go1.7,!go1.8 - -package util - -import ( - "crypto/tls" -) - -// CloneTLSConfig returns a copy of c. Only the exported fields are copied. -// This is temporary, until this is provided by the language. -// https://go-review.googlesource.com/#/c/28075/ -func CloneTLSConfig(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, - Renegotiation: c.Renegotiation, - } -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nats.go/ws.go b/backend/services/controller/vendor/github.com/nats-io/nats.go/ws.go deleted file mode 100644 index 2c2d421..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nats.go/ws.go +++ /dev/null @@ -1,780 +0,0 @@ -// Copyright 2021-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nats - -import ( - "bufio" - "bytes" - "crypto/rand" - "crypto/sha1" - "encoding/base64" - "encoding/binary" - "errors" - "fmt" - "io" - mrand "math/rand" - "net/http" - "net/url" - "strings" - "time" - "unicode/utf8" - - "github.com/klauspost/compress/flate" -) - -type wsOpCode int - -const ( - // From https://tools.ietf.org/html/rfc6455#section-5.2 - wsTextMessage = wsOpCode(1) - wsBinaryMessage = wsOpCode(2) - wsCloseMessage = wsOpCode(8) - wsPingMessage = wsOpCode(9) - wsPongMessage = wsOpCode(10) - - wsFinalBit = 1 << 7 - wsRsv1Bit = 1 << 6 // Used for compression, from https://tools.ietf.org/html/rfc7692#section-6 - wsRsv2Bit = 1 << 5 - wsRsv3Bit = 1 << 4 - - wsMaskBit = 1 << 7 - - wsContinuationFrame = 0 - wsMaxFrameHeaderSize = 14 - wsMaxControlPayloadSize = 125 - wsCloseSatusSize = 2 - - // From https://tools.ietf.org/html/rfc6455#section-11.7 - wsCloseStatusNormalClosure = 1000 - wsCloseStatusNoStatusReceived = 1005 - wsCloseStatusAbnormalClosure = 1006 - wsCloseStatusInvalidPayloadData = 1007 - - wsScheme = "ws" - wsSchemeTLS = "wss" - - wsPMCExtension = "permessage-deflate" // per-message compression - wsPMCSrvNoCtx = "server_no_context_takeover" - wsPMCCliNoCtx = "client_no_context_takeover" - wsPMCReqHeaderValue = wsPMCExtension + "; " + wsPMCSrvNoCtx + "; " + wsPMCCliNoCtx -) - -// From https://tools.ietf.org/html/rfc6455#section-1.3 -var wsGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") - -var compressFinalBlock = []byte{0x00, 0x00, 0xff, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff} - -type websocketReader struct { - r io.Reader - pending [][]byte - ib []byte - ff bool - fc bool - nl bool - dc *wsDecompressor - nc *Conn -} - -type wsDecompressor struct { - flate io.ReadCloser - bufs [][]byte - off int -} - -type websocketWriter struct { - w io.Writer - compress bool - compressor *flate.Writer - ctrlFrames [][]byte // pending frames that should be sent at the next Write() - cm []byte // close message that needs to be sent when everything else has been sent - cmDone bool // a close message has been added or sent (never going back to false) - noMoreSend bool // if true, even if there is a Write() call, we should not send anything -} - -func (d *wsDecompressor) Read(dst []byte) (int, error) { - if len(dst) == 0 { - return 0, nil - } - if len(d.bufs) == 0 { - return 0, io.EOF - } - copied := 0 - rem := len(dst) - for buf := d.bufs[0]; buf != nil && rem > 0; { - n := len(buf[d.off:]) - if n > rem { - n = rem - } - copy(dst[copied:], buf[d.off:d.off+n]) - copied += n - rem -= n - d.off += n - buf = d.nextBuf() - } - return copied, nil -} - -func (d *wsDecompressor) nextBuf() []byte { - // We still have remaining data in the first buffer - if d.off != len(d.bufs[0]) { - return d.bufs[0] - } - // We read the full first buffer. Reset offset. - d.off = 0 - // We were at the last buffer, so we are done. - if len(d.bufs) == 1 { - d.bufs = nil - return nil - } - // Here we move to the next buffer. - d.bufs = d.bufs[1:] - return d.bufs[0] -} - -func (d *wsDecompressor) ReadByte() (byte, error) { - if len(d.bufs) == 0 { - return 0, io.EOF - } - b := d.bufs[0][d.off] - d.off++ - d.nextBuf() - return b, nil -} - -func (d *wsDecompressor) addBuf(b []byte) { - d.bufs = append(d.bufs, b) -} - -func (d *wsDecompressor) decompress() ([]byte, error) { - d.off = 0 - // As per https://tools.ietf.org/html/rfc7692#section-7.2.2 - // add 0x00, 0x00, 0xff, 0xff and then a final block so that flate reader - // does not report unexpected EOF. - d.bufs = append(d.bufs, compressFinalBlock) - // Create or reset the decompressor with his object (wsDecompressor) - // that provides Read() and ReadByte() APIs that will consume from - // the compressed buffers (d.bufs). - if d.flate == nil { - d.flate = flate.NewReader(d) - } else { - d.flate.(flate.Resetter).Reset(d, nil) - } - b, err := io.ReadAll(d.flate) - // Now reset the compressed buffers list - d.bufs = nil - return b, err -} - -func wsNewReader(r io.Reader) *websocketReader { - return &websocketReader{r: r, ff: true} -} - -// From now on, reads will be from the readLoop and we will need to -// acquire the connection lock should we have to send/write a control -// message from handleControlFrame. -// -// Note: this runs under the connection lock. -func (r *websocketReader) doneWithConnect() { - r.nl = true -} - -func (r *websocketReader) Read(p []byte) (int, error) { - var err error - var buf []byte - - if l := len(r.ib); l > 0 { - buf = r.ib - r.ib = nil - } else { - if len(r.pending) > 0 { - return r.drainPending(p), nil - } - - // Get some data from the underlying reader. - n, err := r.r.Read(p) - if err != nil { - return 0, err - } - buf = p[:n] - } - - // Now parse this and decode frames. We will possibly read more to - // ensure that we get a full frame. - var ( - tmpBuf []byte - pos int - max = len(buf) - rem = 0 - ) - for pos < max { - b0 := buf[pos] - frameType := wsOpCode(b0 & 0xF) - final := b0&wsFinalBit != 0 - compressed := b0&wsRsv1Bit != 0 - pos++ - - tmpBuf, pos, err = wsGet(r.r, buf, pos, 1) - if err != nil { - return 0, err - } - b1 := tmpBuf[0] - - // Store size in case it is < 125 - rem = int(b1 & 0x7F) - - switch frameType { - case wsPingMessage, wsPongMessage, wsCloseMessage: - if rem > wsMaxControlPayloadSize { - return 0, fmt.Errorf( - fmt.Sprintf("control frame length bigger than maximum allowed of %v bytes", - wsMaxControlPayloadSize)) - } - if compressed { - return 0, errors.New("control frame should not be compressed") - } - if !final { - return 0, errors.New("control frame does not have final bit set") - } - case wsTextMessage, wsBinaryMessage: - if !r.ff { - return 0, errors.New("new message started before final frame for previous message was received") - } - r.ff = final - r.fc = compressed - case wsContinuationFrame: - // Compressed bit must be only set in the first frame - if r.ff || compressed { - return 0, errors.New("invalid continuation frame") - } - r.ff = final - default: - return 0, fmt.Errorf("unknown opcode %v", frameType) - } - - // If the encoded size is <= 125, then `rem` is simply the remainder size of the - // frame. If it is 126, then the actual size is encoded as a uint16. For larger - // frames, `rem` will initially be 127 and the actual size is encoded as a uint64. - switch rem { - case 126: - tmpBuf, pos, err = wsGet(r.r, buf, pos, 2) - if err != nil { - return 0, err - } - rem = int(binary.BigEndian.Uint16(tmpBuf)) - case 127: - tmpBuf, pos, err = wsGet(r.r, buf, pos, 8) - if err != nil { - return 0, err - } - rem = int(binary.BigEndian.Uint64(tmpBuf)) - } - - // Handle control messages in place... - if wsIsControlFrame(frameType) { - pos, err = r.handleControlFrame(frameType, buf, pos, rem) - if err != nil { - return 0, err - } - rem = 0 - continue - } - - var b []byte - // This ensures that we get the full payload for this frame. - b, pos, err = wsGet(r.r, buf, pos, rem) - if err != nil { - return 0, err - } - // We read the full frame. - rem = 0 - addToPending := true - if r.fc { - // Don't add to pending if we are not dealing with the final frame. - addToPending = r.ff - // Add the compressed payload buffer to the list. - r.addCBuf(b) - // Decompress only when this is the final frame. - if r.ff { - b, err = r.dc.decompress() - if err != nil { - return 0, err - } - r.fc = false - } - } - // Add to the pending list if dealing with uncompressed frames or - // after we have received the full compressed message and decompressed it. - if addToPending { - r.pending = append(r.pending, b) - } - } - // In case of compression, there may be nothing to drain - if len(r.pending) > 0 { - return r.drainPending(p), nil - } - return 0, nil -} - -func (r *websocketReader) addCBuf(b []byte) { - if r.dc == nil { - r.dc = &wsDecompressor{} - } - // Add a copy of the incoming buffer to the list of compressed buffers. - r.dc.addBuf(append([]byte(nil), b...)) -} - -func (r *websocketReader) drainPending(p []byte) int { - var n int - var max = len(p) - - for i, buf := range r.pending { - if n+len(buf) <= max { - copy(p[n:], buf) - n += len(buf) - } else { - // Is there room left? - if n < max { - // Write the partial and update this slice. - rem := max - n - copy(p[n:], buf[:rem]) - n += rem - r.pending[i] = buf[rem:] - } - // These are the remaining slices that will need to be used at - // the next Read() call. - r.pending = r.pending[i:] - return n - } - } - r.pending = r.pending[:0] - return n -} - -func wsGet(r io.Reader, buf []byte, pos, needed int) ([]byte, int, error) { - avail := len(buf) - pos - if avail >= needed { - return buf[pos : pos+needed], pos + needed, nil - } - b := make([]byte, needed) - start := copy(b, buf[pos:]) - for start != needed { - n, err := r.Read(b[start:cap(b)]) - start += n - if err != nil { - return b, start, err - } - } - return b, pos + avail, nil -} - -func (r *websocketReader) handleControlFrame(frameType wsOpCode, buf []byte, pos, rem int) (int, error) { - var payload []byte - var err error - - if rem > 0 { - payload, pos, err = wsGet(r.r, buf, pos, rem) - if err != nil { - return pos, err - } - } - switch frameType { - case wsCloseMessage: - status := wsCloseStatusNoStatusReceived - var body string - lp := len(payload) - // If there is a payload, the status is represented as a 2-byte - // unsigned integer (in network byte order). Then, there may be an - // optional body. - hasStatus, hasBody := lp >= wsCloseSatusSize, lp > wsCloseSatusSize - if hasStatus { - // Decode the status - status = int(binary.BigEndian.Uint16(payload[:wsCloseSatusSize])) - // Now if there is a body, capture it and make sure this is a valid UTF-8. - if hasBody { - body = string(payload[wsCloseSatusSize:]) - if !utf8.ValidString(body) { - // https://tools.ietf.org/html/rfc6455#section-5.5.1 - // If body is present, it must be a valid utf8 - status = wsCloseStatusInvalidPayloadData - body = "invalid utf8 body in close frame" - } - } - } - r.nc.wsEnqueueCloseMsg(r.nl, status, body) - // Return io.EOF so that readLoop will close the connection as client closed - // after processing pending buffers. - return pos, io.EOF - case wsPingMessage: - r.nc.wsEnqueueControlMsg(r.nl, wsPongMessage, payload) - case wsPongMessage: - // Nothing to do.. - } - return pos, nil -} - -func (w *websocketWriter) Write(p []byte) (int, error) { - if w.noMoreSend { - return 0, nil - } - var total int - var n int - var err error - // If there are control frames, they can be sent now. Actually spec says - // that they should be sent ASAP, so we will send before any application data. - if len(w.ctrlFrames) > 0 { - n, err = w.writeCtrlFrames() - if err != nil { - return n, err - } - total += n - } - // Do the following only if there is something to send. - // We will end with checking for need to send close message. - if len(p) > 0 { - if w.compress { - buf := &bytes.Buffer{} - if w.compressor == nil { - w.compressor, _ = flate.NewWriter(buf, flate.BestSpeed) - } else { - w.compressor.Reset(buf) - } - if n, err = w.compressor.Write(p); err != nil { - return n, err - } - if err = w.compressor.Flush(); err != nil { - return n, err - } - b := buf.Bytes() - p = b[:len(b)-4] - } - fh, key := wsCreateFrameHeader(w.compress, wsBinaryMessage, len(p)) - wsMaskBuf(key, p) - n, err = w.w.Write(fh) - total += n - if err == nil { - n, err = w.w.Write(p) - total += n - } - } - if err == nil && w.cm != nil { - n, err = w.writeCloseMsg() - total += n - } - return total, err -} - -func (w *websocketWriter) writeCtrlFrames() (int, error) { - var ( - n int - total int - i int - err error - ) - for ; i < len(w.ctrlFrames); i++ { - buf := w.ctrlFrames[i] - n, err = w.w.Write(buf) - total += n - if err != nil { - break - } - } - if i != len(w.ctrlFrames) { - w.ctrlFrames = w.ctrlFrames[i+1:] - } else { - w.ctrlFrames = w.ctrlFrames[:0] - } - return total, err -} - -func (w *websocketWriter) writeCloseMsg() (int, error) { - n, err := w.w.Write(w.cm) - w.cm, w.noMoreSend = nil, true - return n, err -} - -func wsMaskBuf(key, buf []byte) { - for i := 0; i < len(buf); i++ { - buf[i] ^= key[i&3] - } -} - -// Create the frame header. -// Encodes the frame type and optional compression flag, and the size of the payload. -func wsCreateFrameHeader(compressed bool, frameType wsOpCode, l int) ([]byte, []byte) { - fh := make([]byte, wsMaxFrameHeaderSize) - n, key := wsFillFrameHeader(fh, compressed, frameType, l) - return fh[:n], key -} - -func wsFillFrameHeader(fh []byte, compressed bool, frameType wsOpCode, l int) (int, []byte) { - var n int - b := byte(frameType) - b |= wsFinalBit - if compressed { - b |= wsRsv1Bit - } - b1 := byte(wsMaskBit) - switch { - case l <= 125: - n = 2 - fh[0] = b - fh[1] = b1 | byte(l) - case l < 65536: - n = 4 - fh[0] = b - fh[1] = b1 | 126 - binary.BigEndian.PutUint16(fh[2:], uint16(l)) - default: - n = 10 - fh[0] = b - fh[1] = b1 | 127 - binary.BigEndian.PutUint64(fh[2:], uint64(l)) - } - var key []byte - var keyBuf [4]byte - if _, err := io.ReadFull(rand.Reader, keyBuf[:4]); err != nil { - kv := mrand.Int31() - binary.LittleEndian.PutUint32(keyBuf[:4], uint32(kv)) - } - copy(fh[n:], keyBuf[:4]) - key = fh[n : n+4] - n += 4 - return n, key -} - -func (nc *Conn) wsInitHandshake(u *url.URL) error { - compress := nc.Opts.Compression - tlsRequired := u.Scheme == wsSchemeTLS || nc.Opts.Secure || nc.Opts.TLSConfig != nil || nc.Opts.TLSCertCB != nil || nc.Opts.RootCAsCB != nil - // Do TLS here as needed. - if tlsRequired { - if err := nc.makeTLSConn(); err != nil { - return err - } - } else { - nc.bindToNewConn() - } - - var err error - - // For http request, we need the passed URL to contain either http or https scheme. - scheme := "http" - if tlsRequired { - scheme = "https" - } - ustr := fmt.Sprintf("%s://%s", scheme, u.Host) - - if nc.Opts.ProxyPath != "" { - proxyPath := nc.Opts.ProxyPath - if !strings.HasPrefix(proxyPath, "/") { - proxyPath = "/" + proxyPath - } - ustr += proxyPath - } - - u, err = url.Parse(ustr) - if err != nil { - return err - } - req := &http.Request{ - Method: "GET", - URL: u, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: make(http.Header), - Host: u.Host, - } - wsKey, err := wsMakeChallengeKey() - if err != nil { - return err - } - - req.Header["Upgrade"] = []string{"websocket"} - req.Header["Connection"] = []string{"Upgrade"} - req.Header["Sec-WebSocket-Key"] = []string{wsKey} - req.Header["Sec-WebSocket-Version"] = []string{"13"} - if compress { - req.Header.Add("Sec-WebSocket-Extensions", wsPMCReqHeaderValue) - } - if err := req.Write(nc.conn); err != nil { - return err - } - - var resp *http.Response - - br := bufio.NewReaderSize(nc.conn, 4096) - nc.conn.SetReadDeadline(time.Now().Add(nc.Opts.Timeout)) - resp, err = http.ReadResponse(br, req) - if err == nil && - (resp.StatusCode != 101 || - !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || - !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || - resp.Header.Get("Sec-Websocket-Accept") != wsAcceptKey(wsKey)) { - - err = fmt.Errorf("invalid websocket connection") - } - // Check compression extension... - if err == nil && compress { - // Check that not only permessage-deflate extension is present, but that - // we also have server and client no context take over. - srvCompress, noCtxTakeover := wsPMCExtensionSupport(resp.Header) - - // If server does not support compression, then simply disable it in our side. - if !srvCompress { - compress = false - } else if !noCtxTakeover { - err = fmt.Errorf("compression negotiation error") - } - } - if resp != nil { - resp.Body.Close() - } - nc.conn.SetReadDeadline(time.Time{}) - if err != nil { - return err - } - - wsr := wsNewReader(nc.br.r) - wsr.nc = nc - // We have to slurp whatever is in the bufio reader and copy to br.r - if n := br.Buffered(); n != 0 { - wsr.ib, _ = br.Peek(n) - } - nc.br.r = wsr - nc.bw.w = &websocketWriter{w: nc.bw.w, compress: compress} - nc.ws = true - return nil -} - -func (nc *Conn) wsClose() { - nc.mu.Lock() - defer nc.mu.Unlock() - if !nc.ws { - return - } - nc.wsEnqueueCloseMsgLocked(wsCloseStatusNormalClosure, _EMPTY_) -} - -func (nc *Conn) wsEnqueueCloseMsg(needsLock bool, status int, payload string) { - // In some low-level unit tests it will happen... - if nc == nil { - return - } - if needsLock { - nc.mu.Lock() - defer nc.mu.Unlock() - } - nc.wsEnqueueCloseMsgLocked(status, payload) -} - -func (nc *Conn) wsEnqueueCloseMsgLocked(status int, payload string) { - wr, ok := nc.bw.w.(*websocketWriter) - if !ok || wr.cmDone { - return - } - statusAndPayloadLen := 2 + len(payload) - frame := make([]byte, 2+4+statusAndPayloadLen) - n, key := wsFillFrameHeader(frame, false, wsCloseMessage, statusAndPayloadLen) - // Set the status - binary.BigEndian.PutUint16(frame[n:], uint16(status)) - // If there is a payload, copy - if len(payload) > 0 { - copy(frame[n+2:], payload) - } - // Mask status + payload - wsMaskBuf(key, frame[n:n+statusAndPayloadLen]) - wr.cm = frame - wr.cmDone = true - nc.bw.flush() - if c := wr.compressor; c != nil { - c.Close() - } -} - -func (nc *Conn) wsEnqueueControlMsg(needsLock bool, frameType wsOpCode, payload []byte) { - // In some low-level unit tests it will happen... - if nc == nil { - return - } - if needsLock { - nc.mu.Lock() - defer nc.mu.Unlock() - } - wr, ok := nc.bw.w.(*websocketWriter) - if !ok { - return - } - fh, key := wsCreateFrameHeader(false, frameType, len(payload)) - wr.ctrlFrames = append(wr.ctrlFrames, fh) - if len(payload) > 0 { - wsMaskBuf(key, payload) - wr.ctrlFrames = append(wr.ctrlFrames, payload) - } - nc.bw.flush() -} - -func wsPMCExtensionSupport(header http.Header) (bool, bool) { - for _, extensionList := range header["Sec-Websocket-Extensions"] { - extensions := strings.Split(extensionList, ",") - for _, extension := range extensions { - extension = strings.Trim(extension, " \t") - params := strings.Split(extension, ";") - for i, p := range params { - p = strings.Trim(p, " \t") - if strings.EqualFold(p, wsPMCExtension) { - var snc bool - var cnc bool - for j := i + 1; j < len(params); j++ { - p = params[j] - p = strings.Trim(p, " \t") - if strings.EqualFold(p, wsPMCSrvNoCtx) { - snc = true - } else if strings.EqualFold(p, wsPMCCliNoCtx) { - cnc = true - } - if snc && cnc { - return true, true - } - } - return true, false - } - } - } - } - return false, false -} - -func wsMakeChallengeKey() (string, error) { - p := make([]byte, 16) - if _, err := io.ReadFull(rand.Reader, p); err != nil { - return "", err - } - return base64.StdEncoding.EncodeToString(p), nil -} - -func wsAcceptKey(key string) string { - h := sha1.New() - h.Write([]byte(key)) - h.Write(wsGUID) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -// Returns true if the op code corresponds to a control frame. -func wsIsControlFrame(frameType wsOpCode) bool { - return frameType >= wsCloseMessage -} - -func isWebsocketScheme(u *url.URL) bool { - return u.Scheme == wsScheme || u.Scheme == wsSchemeTLS -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nkeys/.gitignore b/backend/services/controller/vendor/github.com/nats-io/nkeys/.gitignore deleted file mode 100644 index d23676d..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nkeys/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib -build/ - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ -.idea/ diff --git a/backend/services/controller/vendor/github.com/nats-io/nkeys/.goreleaser.yml b/backend/services/controller/vendor/github.com/nats-io/nkeys/.goreleaser.yml deleted file mode 100644 index e5c4f15..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nkeys/.goreleaser.yml +++ /dev/null @@ -1,63 +0,0 @@ -project_name: nkeys -release: - github: - owner: nats-io - name: nkeys - name_template: '{{.Tag}}' - draft: true -builds: - - id: nk - main: ./nk/main.go - ldflags: "-X main.Version={{.Tag}}_{{.Commit}}" - binary: nk - goos: - - darwin - - linux - - windows - - freebsd - goarch: - - amd64 - - arm - - arm64 - - 386 - - mips64le - - s390x - goarm: - - 6 - - 7 - ignore: - - goos: darwin - goarch: 386 - - goos: freebsd - goarch: arm - - goos: freebsd - goarch: arm64 - - goos: freebsd - goarch: 386 - -dist: build - -archives: - - name_template: '{{ .ProjectName }}-v{{ .Version }}-{{ .Os }}-{{ .Arch }}{{ if .Arm - }}v{{ .Arm }}{{ end }}' - wrap_in_directory: true - format: zip - files: - - README.md - - LICENSE - -checksum: - name_template: '{{ .ProjectName }}-v{{ .Version }}-checksums.txt' - -snapshot: - name_template: 'dev' - -nfpms: - - file_name_template: '{{ .ProjectName }}-v{{ .Version }}-{{ .Arch }}{{ if .Arm - }}v{{ .Arm }}{{ end }}' - maintainer: nats.io - description: NKeys utility cli program - vendor: nats-io - bindir: /usr/local/bin - formats: - - deb \ No newline at end of file diff --git a/backend/services/controller/vendor/github.com/nats-io/nkeys/GOVERNANCE.md b/backend/services/controller/vendor/github.com/nats-io/nkeys/GOVERNANCE.md deleted file mode 100644 index 744d3bc..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nkeys/GOVERNANCE.md +++ /dev/null @@ -1,3 +0,0 @@ -# NATS NKEYS Governance - -NATS NKEYS is part of the NATS project and is subject to the [NATS Governance](https://github.com/nats-io/nats-general/blob/master/GOVERNANCE.md). \ No newline at end of file diff --git a/backend/services/controller/vendor/github.com/nats-io/nkeys/LICENSE b/backend/services/controller/vendor/github.com/nats-io/nkeys/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nkeys/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/backend/services/controller/vendor/github.com/nats-io/nkeys/MAINTAINERS.md b/backend/services/controller/vendor/github.com/nats-io/nkeys/MAINTAINERS.md deleted file mode 100644 index 2321465..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nkeys/MAINTAINERS.md +++ /dev/null @@ -1,8 +0,0 @@ -# Maintainers - -Maintainership is on a per project basis. - -### Maintainers - - Derek Collison [@derekcollison](https://github.com/derekcollison) - - Ivan Kozlovic [@kozlovic](https://github.com/kozlovic) - - Waldemar Quevedo [@wallyqs](https://github.com/wallyqs) diff --git a/backend/services/controller/vendor/github.com/nats-io/nkeys/README.md b/backend/services/controller/vendor/github.com/nats-io/nkeys/README.md deleted file mode 100644 index 17e3a8e..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nkeys/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# NKEYS - -[![License Apache 2](https://img.shields.io/badge/License-Apache2-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0) -[![Go Report Card](https://goreportcard.com/badge/github.com/nats-io/nkeys)](https://goreportcard.com/report/github.com/nats-io/nkeys) -[![Build Status](https://github.com/nats-io/nkeys/actions/workflows/release.yaml/badge.svg)](https://github.com/nats-io/nkeys/actions/workflows/release.yaml/badge.svg) -[![GoDoc](https://godoc.org/github.com/nats-io/nkeys?status.svg)](https://godoc.org/github.com/nats-io/nkeys) -[![Coverage Status](https://coveralls.io/repos/github/nats-io/nkeys/badge.svg?branch=main&service=github)](https://coveralls.io/github/nats-io/nkeys?branch=main) - -A public-key signature system based on [Ed25519](https://ed25519.cr.yp.to/) for the NATS ecosystem. - -## About - -The NATS ecosystem will be moving to [Ed25519](https://ed25519.cr.yp.to/) keys for identity, authentication and authorization for entities such as Accounts, Users, Servers and Clusters. - -Ed25519 is fast and resistant to side channel attacks. Generation of a seed key is all that is needed to be stored and kept safe, as the seed can generate both the public and private keys. - -The NATS system will utilize Ed25519 keys, meaning that NATS systems will never store or even have access to any private keys. Authentication will utilize a random challenge response mechanism. - -Dealing with 32 byte and 64 byte raw keys can be challenging. NKEYS is designed to formulate keys in a much friendlier fashion and references work done in cryptocurrencies, specifically [Stellar](https://www.stellar.org/). Bitcoin and others used a form of Base58 (or Base58Check) to encode raw keys. Stellar utilized a more traditional Base32 with a CRC16 and a version or prefix byte. NKEYS utilizes a similar format where the prefix will be 1 byte for public and private keys and will be 2 bytes for seeds. The base32 encoding of these prefixes will yield friendly human readable prefixes, e.g. '**N**' = server, '**C**' = cluster, '**O**' = operator, '**A**' = account, and '**U**' = user. '**P**' is used for private keys. For seeds, the first encoded prefix is '**S**', and the second character will be the type for the public key, e.g. "**SU**" is a seed for a user key pair, "**SA**" is a seed for an account key pair. - -## Installation - -Use the `go` command: - - $ go get github.com/nats-io/nkeys - -## nk - Command Line Utility - -Located under the nk [directory](https://github.com/nats-io/nkeys/tree/master/nk). - -## Basic API Usage -```go - -// Create a new User KeyPair -user, _ := nkeys.CreateUser() - -// Sign some data with a full key pair user. -data := []byte("Hello World") -sig, _ := user.Sign(data) - -// Verify the signature. -err = user.Verify(data, sig) - -// Access the seed, the only thing that needs to be stored and kept safe. -// seed = "SUAKYRHVIOREXV7EUZTBHUHL7NUMHPMAS7QMDU3GTIUWEI5LDNOXD43IZY" -seed, _ := user.Seed() - -// Access the public key which can be shared. -// publicKey = "UD466L6EBCM3YY5HEGHJANNTN4LSKTSUXTH7RILHCKEQMQHTBNLHJJXT" -publicKey, _ := user.PublicKey() - -// Create a full User who can sign and verify from a private seed. -user, _ = nkeys.FromSeed(seed) - -// Create a User who can only verify signatures via a public key. -user, _ = nkeys.FromPublicKey(publicKey) - -// Create a User KeyPair with our own random data. -var rawSeed [32]byte -_, err := io.ReadFull(rand.Reader, rawSeed[:]) // Or some other random source. -user2, _ := nkeys.FromRawSeed(PrefixByteUser, rawSeed) - -``` - -## License - -Unless otherwise noted, the NATS source files are distributed -under the Apache Version 2.0 license found in the LICENSE file. diff --git a/backend/services/controller/vendor/github.com/nats-io/nkeys/TODO.md b/backend/services/controller/vendor/github.com/nats-io/nkeys/TODO.md deleted file mode 100644 index 2649c9e..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nkeys/TODO.md +++ /dev/null @@ -1,5 +0,0 @@ - -# General - -- [ ] Child key derivation -- [ ] Hardware support, e.g. YubiHSM diff --git a/backend/services/controller/vendor/github.com/nats-io/nkeys/crc16.go b/backend/services/controller/vendor/github.com/nats-io/nkeys/crc16.go deleted file mode 100644 index fbe38fb..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nkeys/crc16.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2018 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nkeys - -// An implementation of crc16 according to CCITT standards for XMODEM. - -var crc16tab = [256]uint16{ - 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, - 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, - 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, - 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, - 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, - 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, - 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, - 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, - 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, - 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, - 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, - 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, - 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, - 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, - 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, - 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, - 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, - 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, - 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, - 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, - 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, - 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, - 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, - 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, - 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, - 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, - 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, - 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, - 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, - 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, - 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, - 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, -} - -// crc16 returns the 2-byte crc for the data provided. -func crc16(data []byte) uint16 { - var crc uint16 - for _, b := range data { - crc = ((crc << 8) & 0xffff) ^ crc16tab[((crc>>8)^uint16(b))&0x00FF] - } - return crc -} - -// validate will check the calculated crc16 checksum for data against the expected. -func validate(data []byte, expected uint16) error { - if crc16(data) != expected { - return ErrInvalidChecksum - } - return nil -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nkeys/creds_utils.go b/backend/services/controller/vendor/github.com/nats-io/nkeys/creds_utils.go deleted file mode 100644 index ecd9463..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nkeys/creds_utils.go +++ /dev/null @@ -1,78 +0,0 @@ -package nkeys - -import ( - "bytes" - "regexp" - "strings" -) - -var userConfigRE = regexp.MustCompile(`\s*(?:(?:[-]{3,}.*[-]{3,}\r?\n)([\w\-.=]+)(?:\r?\n[-]{3,}.*[-]{3,}\r?\n))`) - -// ParseDecoratedJWT takes a creds file and returns the JWT portion. -func ParseDecoratedJWT(contents []byte) (string, error) { - items := userConfigRE.FindAllSubmatch(contents, -1) - if len(items) == 0 { - return string(contents), nil - } - // First result should be the user JWT. - // We copy here so that if the file contained a seed file too we wipe appropriately. - raw := items[0][1] - tmp := make([]byte, len(raw)) - copy(tmp, raw) - return strings.TrimSpace(string(tmp)), nil -} - -// ParseDecoratedNKey takes a creds file, finds the NKey portion and creates a -// key pair from it. -func ParseDecoratedNKey(contents []byte) (KeyPair, error) { - var seed []byte - - items := userConfigRE.FindAllSubmatch(contents, -1) - if len(items) > 1 { - seed = items[1][1] - } else { - lines := bytes.Split(contents, []byte("\n")) - for _, line := range lines { - if bytes.HasPrefix(bytes.TrimSpace(line), []byte("SO")) || - bytes.HasPrefix(bytes.TrimSpace(line), []byte("SA")) || - bytes.HasPrefix(bytes.TrimSpace(line), []byte("SU")) { - seed = line - break - } - } - } - if seed == nil { - return nil, ErrNoSeedFound - } - if !bytes.HasPrefix(seed, []byte("SO")) && - !bytes.HasPrefix(seed, []byte("SA")) && - !bytes.HasPrefix(seed, []byte("SU")) { - return nil, ErrInvalidNkeySeed - } - kp, err := FromSeed(seed) - if err != nil { - return nil, err - } - return kp, nil -} - -// ParseDecoratedUserNKey takes a creds file, finds the NKey portion and creates a -// key pair from it. Similar to ParseDecoratedNKey but fails for non-user keys. -func ParseDecoratedUserNKey(contents []byte) (KeyPair, error) { - nk, err := ParseDecoratedNKey(contents) - if err != nil { - return nil, err - } - seed, err := nk.Seed() - if err != nil { - return nil, err - } - if !bytes.HasPrefix(seed, []byte("SU")) { - return nil, ErrInvalidUserSeed - } - kp, err := FromSeed(seed) - if err != nil { - return nil, err - } - return kp, nil -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nkeys/dependencies.md b/backend/services/controller/vendor/github.com/nats-io/nkeys/dependencies.md deleted file mode 100644 index 370184a..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nkeys/dependencies.md +++ /dev/null @@ -1,12 +0,0 @@ -# External Dependencies - -This file lists the dependencies used in this repository. - -| Dependency | License | -|-|-| -| Go | BSD 3-Clause "New" or "Revised" License | -| golang.org/x/crypto v0.3.0 | BSD 3-Clause "New" or "Revised" License | -| golang.org/x/net v0.2.0 | BSD 3-Clause "New" or "Revised" License | -| golang.org/x/sys v0.2.0 | BSD 3-Clause "New" or "Revised" License | -| golang.org/x/term v0.2.0 | BSD 3-Clause "New" or "Revised" License | -| golang.org/x/text v0.4.0 | BSD 3-Clause "New" or "Revised" License | diff --git a/backend/services/controller/vendor/github.com/nats-io/nkeys/errors.go b/backend/services/controller/vendor/github.com/nats-io/nkeys/errors.go deleted file mode 100644 index a30bb96..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nkeys/errors.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nkeys - -// Errors -const ( - ErrInvalidPrefixByte = nkeysError("nkeys: invalid prefix byte") - ErrInvalidKey = nkeysError("nkeys: invalid key") - ErrInvalidPublicKey = nkeysError("nkeys: invalid public key") - ErrInvalidPrivateKey = nkeysError("nkeys: invalid private key") - ErrInvalidSeedLen = nkeysError("nkeys: invalid seed length") - ErrInvalidSeed = nkeysError("nkeys: invalid seed") - ErrInvalidEncoding = nkeysError("nkeys: invalid encoded key") - ErrInvalidSignature = nkeysError("nkeys: signature verification failed") - ErrCannotSign = nkeysError("nkeys: can not sign, no private key available") - ErrPublicKeyOnly = nkeysError("nkeys: no seed or private key available") - ErrIncompatibleKey = nkeysError("nkeys: incompatible key") - ErrInvalidChecksum = nkeysError("nkeys: invalid checksum") - ErrNoSeedFound = nkeysError("nkeys: no nkey seed found") - ErrInvalidNkeySeed = nkeysError("nkeys: doesn't contain a seed nkey") - ErrInvalidUserSeed = nkeysError("nkeys: doesn't contain an user seed nkey") - ErrInvalidRecipient = nkeysError("nkeys: not a valid recipient public curve key") - ErrInvalidSender = nkeysError("nkeys: not a valid sender public curve key") - ErrInvalidCurveKey = nkeysError("nkeys: not a valid curve key") - ErrInvalidCurveSeed = nkeysError("nkeys: not a valid curve seed") - ErrInvalidEncrypted = nkeysError("nkeys: encrypted input is not valid") - ErrInvalidEncVersion = nkeysError("nkeys: encrypted input wrong version") - ErrCouldNotDecrypt = nkeysError("nkeys: could not decrypt input") - ErrInvalidCurveKeyOperation = nkeysError("nkeys: curve key is not valid for sign/verify") - ErrInvalidNKeyOperation = nkeysError("nkeys: only curve key can seal/open") - ErrCannotOpen = nkeysError("nkeys: cannot open no private curve key available") - ErrCannotSeal = nkeysError("nkeys: cannot seal no private curve key available") -) - -type nkeysError string - -func (e nkeysError) Error() string { - return string(e) -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nkeys/keypair.go b/backend/services/controller/vendor/github.com/nats-io/nkeys/keypair.go deleted file mode 100644 index 9d05518..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nkeys/keypair.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2018-2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nkeys - -import ( - "bytes" - "crypto/rand" - "io" - - "golang.org/x/crypto/ed25519" -) - -// kp is the internal struct for a kepypair using seed. -type kp struct { - seed []byte -} - -// All seeds are 32 bytes long. -const seedLen = 32 - -// CreatePair will create a KeyPair based on the rand entropy and a type/prefix byte. -func CreatePair(prefix PrefixByte) (KeyPair, error) { - return CreatePairWithRand(prefix, rand.Reader) -} - -// CreatePair will create a KeyPair based on the rand reader and a type/prefix byte. rand can be nil. -func CreatePairWithRand(prefix PrefixByte, rr io.Reader) (KeyPair, error) { - if prefix == PrefixByteCurve { - return CreateCurveKeysWithRand(rr) - } - if rr == nil { - rr = rand.Reader - } - var rawSeed [seedLen]byte - - _, err := io.ReadFull(rr, rawSeed[:]) - if err != nil { - return nil, err - } - - seed, err := EncodeSeed(prefix, rawSeed[:]) - if err != nil { - return nil, err - } - return &kp{seed}, nil -} - -// rawSeed will return the raw, decoded 64 byte seed. -func (pair *kp) rawSeed() ([]byte, error) { - _, raw, err := DecodeSeed(pair.seed) - return raw, err -} - -// keys will return a 32 byte public key and a 64 byte private key utilizing the seed. -func (pair *kp) keys() (ed25519.PublicKey, ed25519.PrivateKey, error) { - raw, err := pair.rawSeed() - if err != nil { - return nil, nil, err - } - return ed25519.GenerateKey(bytes.NewReader(raw)) -} - -// Wipe will randomize the contents of the seed key -func (pair *kp) Wipe() { - io.ReadFull(rand.Reader, pair.seed) - pair.seed = nil -} - -// Seed will return the encoded seed. -func (pair *kp) Seed() ([]byte, error) { - return pair.seed, nil -} - -// PublicKey will return the encoded public key associated with the KeyPair. -// All KeyPairs have a public key. -func (pair *kp) PublicKey() (string, error) { - public, raw, err := DecodeSeed(pair.seed) - if err != nil { - return "", err - } - pub, _, err := ed25519.GenerateKey(bytes.NewReader(raw)) - if err != nil { - return "", err - } - pk, err := Encode(public, pub) - if err != nil { - return "", err - } - return string(pk), nil -} - -// PrivateKey will return the encoded private key for KeyPair. -func (pair *kp) PrivateKey() ([]byte, error) { - _, priv, err := pair.keys() - if err != nil { - return nil, err - } - return Encode(PrefixBytePrivate, priv) -} - -// Sign will sign the input with KeyPair's private key. -func (pair *kp) Sign(input []byte) ([]byte, error) { - _, priv, err := pair.keys() - if err != nil { - return nil, err - } - return ed25519.Sign(priv, input), nil -} - -// Verify will verify the input against a signature utilizing the public key. -func (pair *kp) Verify(input []byte, sig []byte) error { - pub, _, err := pair.keys() - if err != nil { - return err - } - if !ed25519.Verify(pub, input, sig) { - return ErrInvalidSignature - } - return nil -} - -// Seal is only supported on CurveKeyPair -func (pair *kp) Seal(input []byte, recipient string) ([]byte, error) { - return nil, ErrInvalidNKeyOperation -} - -// SealWithRand is only supported on CurveKeyPair -func (pair *kp) SealWithRand(input []byte, recipient string, rr io.Reader) ([]byte, error) { - return nil, ErrInvalidNKeyOperation -} - -// Open is only supported on CurveKey -func (pair *kp) Open(input []byte, sender string) ([]byte, error) { - return nil, ErrInvalidNKeyOperation -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nkeys/nkeys.go b/backend/services/controller/vendor/github.com/nats-io/nkeys/nkeys.go deleted file mode 100644 index 6f1ba20..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nkeys/nkeys.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2018-2019 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package nkeys is an Ed25519 based public-key signature system that simplifies keys and seeds -// and performs signing and verification. -// It also supports encryption via x25519 keys and is compatible with https://pkg.go.dev/golang.org/x/crypto/nacl/box. -package nkeys - -import "io" - -// Version is our current version -const Version = "0.4.7" - -// KeyPair provides the central interface to nkeys. -type KeyPair interface { - Seed() ([]byte, error) - PublicKey() (string, error) - PrivateKey() ([]byte, error) - // Sign is only supported on Non CurveKeyPairs - Sign(input []byte) ([]byte, error) - // Verify is only supported on Non CurveKeyPairs - Verify(input []byte, sig []byte) error - Wipe() - // Seal is only supported on CurveKeyPair - Seal(input []byte, recipient string) ([]byte, error) - // SealWithRand is only supported on CurveKeyPair - SealWithRand(input []byte, recipient string, rr io.Reader) ([]byte, error) - // Open is only supported on CurveKey - Open(input []byte, sender string) ([]byte, error) -} - -// CreateUser will create a User typed KeyPair. -func CreateUser() (KeyPair, error) { - return CreatePair(PrefixByteUser) -} - -// CreateAccount will create an Account typed KeyPair. -func CreateAccount() (KeyPair, error) { - return CreatePair(PrefixByteAccount) -} - -// CreateServer will create a Server typed KeyPair. -func CreateServer() (KeyPair, error) { - return CreatePair(PrefixByteServer) -} - -// CreateCluster will create a Cluster typed KeyPair. -func CreateCluster() (KeyPair, error) { - return CreatePair(PrefixByteCluster) -} - -// CreateOperator will create an Operator typed KeyPair. -func CreateOperator() (KeyPair, error) { - return CreatePair(PrefixByteOperator) -} - -// FromPublicKey will create a KeyPair capable of verifying signatures. -func FromPublicKey(public string) (KeyPair, error) { - raw, err := decode([]byte(public)) - if err != nil { - return nil, err - } - pre := PrefixByte(raw[0]) - if err := checkValidPublicPrefixByte(pre); err != nil { - return nil, ErrInvalidPublicKey - } - return &pub{pre, raw[1:]}, nil -} - -// FromSeed will create a KeyPair capable of signing and verifying signatures. -func FromSeed(seed []byte) (KeyPair, error) { - prefix, _, err := DecodeSeed(seed) - if err != nil { - return nil, err - } - if prefix == PrefixByteCurve { - return FromCurveSeed(seed) - } - copy := append([]byte{}, seed...) - return &kp{copy}, nil -} - -// FromRawSeed will create a KeyPair from the raw 32 byte seed for a given type. -func FromRawSeed(prefix PrefixByte, rawSeed []byte) (KeyPair, error) { - seed, err := EncodeSeed(prefix, rawSeed) - if err != nil { - return nil, err - } - return &kp{seed}, nil -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nkeys/public.go b/backend/services/controller/vendor/github.com/nats-io/nkeys/public.go deleted file mode 100644 index c3cd21e..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nkeys/public.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2018 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nkeys - -import ( - "crypto/rand" - "io" - - "golang.org/x/crypto/ed25519" -) - -// A KeyPair from a public key capable of verifying only. -type pub struct { - pre PrefixByte - pub ed25519.PublicKey -} - -// PublicKey will return the encoded public key associated with the KeyPair. -// All KeyPairs have a public key. -func (p *pub) PublicKey() (string, error) { - pk, err := Encode(p.pre, p.pub) - if err != nil { - return "", err - } - return string(pk), nil -} - -// Seed will return an error since this is not available for public key only KeyPairs. -func (p *pub) Seed() ([]byte, error) { - return nil, ErrPublicKeyOnly -} - -// PrivateKey will return an error since this is not available for public key only KeyPairs. -func (p *pub) PrivateKey() ([]byte, error) { - return nil, ErrPublicKeyOnly -} - -// Sign will return an error since this is not available for public key only KeyPairs. -func (p *pub) Sign(input []byte) ([]byte, error) { - return nil, ErrCannotSign -} - -// Verify will verify the input against a signature utilizing the public key. -func (p *pub) Verify(input []byte, sig []byte) error { - if !ed25519.Verify(p.pub, input, sig) { - return ErrInvalidSignature - } - return nil -} - -// Wipe will randomize the public key and erase the pre byte. -func (p *pub) Wipe() { - p.pre = '0' - io.ReadFull(rand.Reader, p.pub) -} - -func (p *pub) Seal(input []byte, recipient string) ([]byte, error) { - if p.pre == PrefixByteCurve { - return nil, ErrCannotSeal - } - return nil, ErrInvalidNKeyOperation -} -func (p *pub) SealWithRand(input []byte, _recipient string, rr io.Reader) ([]byte, error) { - if p.pre == PrefixByteCurve { - return nil, ErrCannotSeal - } - return nil, ErrInvalidNKeyOperation -} - -func (p *pub) Open(input []byte, sender string) ([]byte, error) { - if p.pre == PrefixByteCurve { - return nil, ErrCannotOpen - } - return nil, ErrInvalidNKeyOperation -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nkeys/strkey.go b/backend/services/controller/vendor/github.com/nats-io/nkeys/strkey.go deleted file mode 100644 index 8ae3311..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nkeys/strkey.go +++ /dev/null @@ -1,314 +0,0 @@ -// Copyright 2018-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nkeys - -import ( - "bytes" - "encoding/base32" - "encoding/binary" -) - -// PrefixByte is a lead byte representing the type. -type PrefixByte byte - -const ( - // PrefixByteSeed is the version byte used for encoded NATS Seeds - PrefixByteSeed PrefixByte = 18 << 3 // Base32-encodes to 'S...' - - // PrefixBytePrivate is the version byte used for encoded NATS Private keys - PrefixBytePrivate PrefixByte = 15 << 3 // Base32-encodes to 'P...' - - // PrefixByteServer is the version byte used for encoded NATS Servers - PrefixByteServer PrefixByte = 13 << 3 // Base32-encodes to 'N...' - - // PrefixByteCluster is the version byte used for encoded NATS Clusters - PrefixByteCluster PrefixByte = 2 << 3 // Base32-encodes to 'C...' - - // PrefixByteOperator is the version byte used for encoded NATS Operators - PrefixByteOperator PrefixByte = 14 << 3 // Base32-encodes to 'O...' - - // PrefixByteAccount is the version byte used for encoded NATS Accounts - PrefixByteAccount PrefixByte = 0 // Base32-encodes to 'A...' - - // PrefixByteUser is the version byte used for encoded NATS Users - PrefixByteUser PrefixByte = 20 << 3 // Base32-encodes to 'U...' - - // PrefixByteCurve is the version byte used for encoded CurveKeys (X25519) - PrefixByteCurve PrefixByte = 23 << 3 // Base32-encodes to 'X...' - - // PrefixByteUnknown is for unknown prefixes. - PrefixByteUnknown PrefixByte = 25 << 3 // Base32-encodes to 'Z...' -) - -// Set our encoding to not include padding '==' -var b32Enc = base32.StdEncoding.WithPadding(base32.NoPadding) - -// Encode will encode a raw key or seed with the prefix and crc16 and then base32 encoded. -func Encode(prefix PrefixByte, src []byte) ([]byte, error) { - if err := checkValidPrefixByte(prefix); err != nil { - return nil, err - } - - var raw bytes.Buffer - - // write prefix byte - if err := raw.WriteByte(byte(prefix)); err != nil { - return nil, err - } - - // write payload - if _, err := raw.Write(src); err != nil { - return nil, err - } - - // Calculate and write crc16 checksum - err := binary.Write(&raw, binary.LittleEndian, crc16(raw.Bytes())) - if err != nil { - return nil, err - } - - data := raw.Bytes() - buf := make([]byte, b32Enc.EncodedLen(len(data))) - b32Enc.Encode(buf, data) - return buf[:], nil -} - -// EncodeSeed will encode a raw key with the prefix and then seed prefix and crc16 and then base32 encoded. -// `src` must be 32 bytes long (ed25519.SeedSize). -func EncodeSeed(public PrefixByte, src []byte) ([]byte, error) { - if err := checkValidPublicPrefixByte(public); err != nil { - return nil, err - } - - if len(src) != seedLen { - return nil, ErrInvalidSeedLen - } - - // In order to make this human printable for both bytes, we need to do a little - // bit manipulation to setup for base32 encoding which takes 5 bits at a time. - b1 := byte(PrefixByteSeed) | (byte(public) >> 5) - b2 := (byte(public) & 31) << 3 // 31 = 00011111 - - var raw bytes.Buffer - - raw.WriteByte(b1) - raw.WriteByte(b2) - - // write payload - if _, err := raw.Write(src); err != nil { - return nil, err - } - - // Calculate and write crc16 checksum - err := binary.Write(&raw, binary.LittleEndian, crc16(raw.Bytes())) - if err != nil { - return nil, err - } - - data := raw.Bytes() - buf := make([]byte, b32Enc.EncodedLen(len(data))) - b32Enc.Encode(buf, data) - return buf, nil -} - -// IsValidEncoding will tell you if the encoding is a valid key. -func IsValidEncoding(src []byte) bool { - _, err := decode(src) - return err == nil -} - -// decode will decode the base32 and check crc16 and the prefix for validity. -func decode(src []byte) ([]byte, error) { - raw := make([]byte, b32Enc.DecodedLen(len(src))) - n, err := b32Enc.Decode(raw, src) - if err != nil { - return nil, err - } - raw = raw[:n] - - if n < 4 { - return nil, ErrInvalidEncoding - } - - crc := binary.LittleEndian.Uint16(raw[n-2:]) - - // ensure checksum is valid - if err := validate(raw[0:n-2], crc); err != nil { - return nil, err - } - - return raw[:n-2], nil -} - -// Decode will decode the base32 string and check crc16 and enforce the prefix is what is expected. -func Decode(expectedPrefix PrefixByte, src []byte) ([]byte, error) { - if err := checkValidPrefixByte(expectedPrefix); err != nil { - return nil, err - } - raw, err := decode(src) - if err != nil { - return nil, err - } - b1 := raw[0] & 248 // 248 = 11111000 - if prefix := PrefixByte(b1); prefix != expectedPrefix { - return nil, ErrInvalidPrefixByte - } - return raw[1:], nil -} - -// DecodeSeed will decode the base32 string and check crc16 and enforce the prefix is a seed -// and the subsequent type is a valid type. -func DecodeSeed(src []byte) (PrefixByte, []byte, error) { - raw, err := decode(src) - if err != nil { - return PrefixByteSeed, nil, err - } - // Need to do the reverse here to get back to internal representation. - b1 := raw[0] & 248 // 248 = 11111000 - b2 := (raw[0]&7)<<5 | ((raw[1] & 248) >> 3) // 7 = 00000111 - - if PrefixByte(b1) != PrefixByteSeed { - return PrefixByteSeed, nil, ErrInvalidSeed - } - if checkValidPublicPrefixByte(PrefixByte(b2)) != nil { - return PrefixByteSeed, nil, ErrInvalidSeed - } - return PrefixByte(b2), raw[2:], nil -} - -// Prefix returns PrefixBytes of its input -func Prefix(src string) PrefixByte { - b, err := decode([]byte(src)) - if err != nil { - return PrefixByteUnknown - } - prefix := PrefixByte(b[0]) - err = checkValidPrefixByte(prefix) - if err == nil { - return prefix - } - // Might be a seed. - b1 := b[0] & 248 - if PrefixByte(b1) == PrefixByteSeed { - return PrefixByteSeed - } - return PrefixByteUnknown -} - -// IsValidPublicKey will decode and verify that the string is a valid encoded public key. -func IsValidPublicKey(src string) bool { - b, err := decode([]byte(src)) - if err != nil { - return false - } - if prefix := PrefixByte(b[0]); checkValidPublicPrefixByte(prefix) != nil { - return false - } - return true -} - -// IsValidPublicUserKey will decode and verify the string is a valid encoded Public User Key. -func IsValidPublicUserKey(src string) bool { - _, err := Decode(PrefixByteUser, []byte(src)) - return err == nil -} - -// IsValidPublicAccountKey will decode and verify the string is a valid encoded Public Account Key. -func IsValidPublicAccountKey(src string) bool { - _, err := Decode(PrefixByteAccount, []byte(src)) - return err == nil -} - -// IsValidPublicServerKey will decode and verify the string is a valid encoded Public Server Key. -func IsValidPublicServerKey(src string) bool { - _, err := Decode(PrefixByteServer, []byte(src)) - return err == nil -} - -// IsValidPublicClusterKey will decode and verify the string is a valid encoded Public Cluster Key. -func IsValidPublicClusterKey(src string) bool { - _, err := Decode(PrefixByteCluster, []byte(src)) - return err == nil -} - -// IsValidPublicOperatorKey will decode and verify the string is a valid encoded Public Operator Key. -func IsValidPublicOperatorKey(src string) bool { - _, err := Decode(PrefixByteOperator, []byte(src)) - return err == nil -} - -// IsValidPublicCurveKey will decode and verify the string is a valid encoded Public Curve Key. -func IsValidPublicCurveKey(src string) bool { - _, err := Decode(PrefixByteCurve, []byte(src)) - return err == nil -} - -// checkValidPrefixByte returns an error if the provided value -// is not one of the defined valid prefix byte constants. -func checkValidPrefixByte(prefix PrefixByte) error { - switch prefix { - case PrefixByteOperator, PrefixByteServer, PrefixByteCluster, - PrefixByteAccount, PrefixByteUser, PrefixByteSeed, PrefixBytePrivate, PrefixByteCurve: - return nil - } - return ErrInvalidPrefixByte -} - -// checkValidPublicPrefixByte returns an error if the provided value -// is not one of the public defined valid prefix byte constants. -func checkValidPublicPrefixByte(prefix PrefixByte) error { - switch prefix { - case PrefixByteOperator, PrefixByteServer, PrefixByteCluster, PrefixByteAccount, PrefixByteUser, PrefixByteCurve: - return nil - } - return ErrInvalidPrefixByte -} - -func (p PrefixByte) String() string { - switch p { - case PrefixByteOperator: - return "operator" - case PrefixByteServer: - return "server" - case PrefixByteCluster: - return "cluster" - case PrefixByteAccount: - return "account" - case PrefixByteUser: - return "user" - case PrefixByteSeed: - return "seed" - case PrefixBytePrivate: - return "private" - case PrefixByteCurve: - return "x25519" - } - return "unknown" -} - -// CompatibleKeyPair returns an error if the KeyPair doesn't match expected PrefixByte(s) -func CompatibleKeyPair(kp KeyPair, expected ...PrefixByte) error { - pk, err := kp.PublicKey() - if err != nil { - return err - } - pkType := Prefix(pk) - for _, k := range expected { - if pkType == k { - return nil - } - } - - return ErrIncompatibleKey -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nkeys/xkeys.go b/backend/services/controller/vendor/github.com/nats-io/nkeys/xkeys.go deleted file mode 100644 index 78f8b99..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nkeys/xkeys.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2022-2023 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nkeys - -import ( - "bytes" - "crypto/rand" - "encoding/binary" - "io" - - "golang.org/x/crypto/curve25519" - "golang.org/x/crypto/nacl/box" -) - -// This package will support safe use of X25519 keys for asymmetric encryption. -// We will be compatible with nacl.Box, but generate random nonces automatically. -// We may add more advanced options in the future for group recipients and better -// end to end algorithms. - -const ( - curveKeyLen = 32 - curveDecodeLen = 35 - curveNonceLen = 24 -) - -type ckp struct { - seed [curveKeyLen]byte // Private raw key. -} - -// CreateCurveKeys will create a Curve typed KeyPair. -func CreateCurveKeys() (KeyPair, error) { - return CreateCurveKeysWithRand(rand.Reader) -} - -// CreateCurveKeysWithRand will create a Curve typed KeyPair -// with specified rand source. -func CreateCurveKeysWithRand(rr io.Reader) (KeyPair, error) { - var kp ckp - _, err := io.ReadFull(rr, kp.seed[:]) - if err != nil { - return nil, err - } - return &kp, nil -} - -// Will create a curve key pair from seed. -func FromCurveSeed(seed []byte) (KeyPair, error) { - pb, raw, err := DecodeSeed(seed) - if err != nil { - return nil, err - } - if pb != PrefixByteCurve || len(raw) != curveKeyLen { - return nil, ErrInvalidCurveSeed - } - var kp ckp - copy(kp.seed[:], raw) - return &kp, nil -} - -// Seed will return the encoded seed. -func (pair *ckp) Seed() ([]byte, error) { - return EncodeSeed(PrefixByteCurve, pair.seed[:]) -} - -// PublicKey will return the encoded public key. -func (pair *ckp) PublicKey() (string, error) { - var pub [curveKeyLen]byte - curve25519.ScalarBaseMult(&pub, &pair.seed) - key, err := Encode(PrefixByteCurve, pub[:]) - return string(key), err -} - -// PrivateKey will return the encoded private key. -func (pair *ckp) PrivateKey() ([]byte, error) { - return Encode(PrefixBytePrivate, pair.seed[:]) -} - -func decodePubCurveKey(src string, dest []byte) error { - var raw [curveDecodeLen]byte // should always be 35 - n, err := b32Enc.Decode(raw[:], []byte(src)) - if err != nil { - return err - } - if n != curveDecodeLen { - return ErrInvalidCurveKey - } - // Make sure it is what we expected. - if prefix := PrefixByte(raw[0]); prefix != PrefixByteCurve { - return ErrInvalidPublicKey - } - var crc uint16 - end := n - 2 - sum := raw[end:n] - checksum := bytes.NewReader(sum) - if err := binary.Read(checksum, binary.LittleEndian, &crc); err != nil { - return err - } - - // ensure checksum is valid - if err := validate(raw[:end], crc); err != nil { - return err - } - - // Copy over, ignore prefix byte. - copy(dest, raw[1:end]) - return nil -} - -// Only version for now, but could add in X3DH in the future, etc. -const XKeyVersionV1 = "xkv1" -const vlen = len(XKeyVersionV1) - -// Seal is compatible with nacl.Box.Seal() and can be used in similar situations for small messages. -// We generate the nonce from crypto rand by default. -func (pair *ckp) Seal(input []byte, recipient string) ([]byte, error) { - return pair.SealWithRand(input, recipient, rand.Reader) -} - -func (pair *ckp) SealWithRand(input []byte, recipient string, rr io.Reader) ([]byte, error) { - var ( - rpub [curveKeyLen]byte - nonce [curveNonceLen]byte - out [vlen + curveNonceLen]byte - err error - ) - - if err = decodePubCurveKey(recipient, rpub[:]); err != nil { - return nil, ErrInvalidRecipient - } - if _, err := io.ReadFull(rr, nonce[:]); err != nil { - return nil, err - } - copy(out[:vlen], []byte(XKeyVersionV1)) - copy(out[vlen:], nonce[:]) - return box.Seal(out[:], input, &nonce, &rpub, &pair.seed), nil -} - -func (pair *ckp) Open(input []byte, sender string) ([]byte, error) { - if len(input) <= vlen+curveNonceLen { - return nil, ErrInvalidEncrypted - } - var ( - spub [curveKeyLen]byte - nonce [curveNonceLen]byte - err error - ) - if !bytes.Equal(input[:vlen], []byte(XKeyVersionV1)) { - return nil, ErrInvalidEncVersion - } - copy(nonce[:], input[vlen:vlen+curveNonceLen]) - - if err = decodePubCurveKey(sender, spub[:]); err != nil { - return nil, ErrInvalidSender - } - - decrypted, ok := box.Open(nil, input[vlen+curveNonceLen:], &nonce, &spub, &pair.seed) - if !ok { - return nil, ErrCouldNotDecrypt - } - return decrypted, nil -} - -// Wipe will randomize the contents of the secret key -func (pair *ckp) Wipe() { - io.ReadFull(rand.Reader, pair.seed[:]) -} - -func (pair *ckp) Sign(_ []byte) ([]byte, error) { - return nil, ErrInvalidCurveKeyOperation -} - -func (pair *ckp) Verify(_ []byte, _ []byte) error { - return ErrInvalidCurveKeyOperation -} diff --git a/backend/services/controller/vendor/github.com/nats-io/nuid/.gitignore b/backend/services/controller/vendor/github.com/nats-io/nuid/.gitignore deleted file mode 100644 index daf913b..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nuid/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/backend/services/controller/vendor/github.com/nats-io/nuid/.travis.yml b/backend/services/controller/vendor/github.com/nats-io/nuid/.travis.yml deleted file mode 100644 index 52be726..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nuid/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go -sudo: false -go: -- 1.9.x -- 1.10.x - -install: -- go get -t ./... -- go get github.com/mattn/goveralls - -script: -- go fmt ./... -- go vet ./... -- go test -v -- go test -v --race -- go test -v -covermode=count -coverprofile=coverage.out -- $HOME/gopath/bin/goveralls -coverprofile coverage.out -service travis-ci diff --git a/backend/services/controller/vendor/github.com/nats-io/nuid/GOVERNANCE.md b/backend/services/controller/vendor/github.com/nats-io/nuid/GOVERNANCE.md deleted file mode 100644 index 01aee70..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nuid/GOVERNANCE.md +++ /dev/null @@ -1,3 +0,0 @@ -# NATS NUID Governance - -NATS NUID is part of the NATS project and is subject to the [NATS Governance](https://github.com/nats-io/nats-general/blob/master/GOVERNANCE.md). \ No newline at end of file diff --git a/backend/services/controller/vendor/github.com/nats-io/nuid/LICENSE b/backend/services/controller/vendor/github.com/nats-io/nuid/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nuid/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/backend/services/controller/vendor/github.com/nats-io/nuid/MAINTAINERS.md b/backend/services/controller/vendor/github.com/nats-io/nuid/MAINTAINERS.md deleted file mode 100644 index 6d0ed3e..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nuid/MAINTAINERS.md +++ /dev/null @@ -1,6 +0,0 @@ -# Maintainers - -Maintainership is on a per project basis. - -### Core-maintainers - - Derek Collison [@derekcollison](https://github.com/derekcollison) \ No newline at end of file diff --git a/backend/services/controller/vendor/github.com/nats-io/nuid/README.md b/backend/services/controller/vendor/github.com/nats-io/nuid/README.md deleted file mode 100644 index 16e5394..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nuid/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# NUID - -[![License Apache 2](https://img.shields.io/badge/License-Apache2-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0) -[![ReportCard](http://goreportcard.com/badge/nats-io/nuid)](http://goreportcard.com/report/nats-io/nuid) -[![Build Status](https://travis-ci.org/nats-io/nuid.svg?branch=master)](http://travis-ci.org/nats-io/nuid) -[![Release](https://img.shields.io/badge/release-v1.0.1-1eb0fc.svg)](https://github.com/nats-io/nuid/releases/tag/v1.0.1) -[![GoDoc](http://godoc.org/github.com/nats-io/nuid?status.png)](http://godoc.org/github.com/nats-io/nuid) -[![Coverage Status](https://coveralls.io/repos/github/nats-io/nuid/badge.svg?branch=master)](https://coveralls.io/github/nats-io/nuid?branch=master) - -A highly performant unique identifier generator. - -## Installation - -Use the `go` command: - - $ go get github.com/nats-io/nuid - -## Basic Usage -```go - -// Utilize the global locked instance -nuid := nuid.Next() - -// Create an instance, these are not locked. -n := nuid.New() -nuid = n.Next() - -// Generate a new crypto/rand seeded prefix. -// Generally not needed, happens automatically. -n.RandomizePrefix() -``` - -## Performance -NUID needs to be very fast to generate and be truly unique, all while being entropy pool friendly. -NUID uses 12 bytes of crypto generated data (entropy draining), and 10 bytes of pseudo-random -sequential data that increments with a pseudo-random increment. - -Total length of a NUID string is 22 bytes of base 62 ascii text, so 62^22 or -2707803647802660400290261537185326956544 possibilities. - -NUID can generate identifiers as fast as 60ns, or ~16 million per second. There is an associated -benchmark you can use to test performance on your own hardware. - -## License - -Unless otherwise noted, the NATS source files are distributed -under the Apache Version 2.0 license found in the LICENSE file. diff --git a/backend/services/controller/vendor/github.com/nats-io/nuid/nuid.go b/backend/services/controller/vendor/github.com/nats-io/nuid/nuid.go deleted file mode 100644 index 8134c76..0000000 --- a/backend/services/controller/vendor/github.com/nats-io/nuid/nuid.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2016-2019 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// A unique identifier generator that is high performance, very fast, and tries to be entropy pool friendly. -package nuid - -import ( - "crypto/rand" - "fmt" - "math" - "math/big" - "sync" - "time" - - prand "math/rand" -) - -// NUID needs to be very fast to generate and truly unique, all while being entropy pool friendly. -// We will use 12 bytes of crypto generated data (entropy draining), and 10 bytes of sequential data -// that is started at a pseudo random number and increments with a pseudo-random increment. -// Total is 22 bytes of base 62 ascii text :) - -// Version of the library -const Version = "1.0.1" - -const ( - digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" - base = 62 - preLen = 12 - seqLen = 10 - maxSeq = int64(839299365868340224) // base^seqLen == 62^10 - minInc = int64(33) - maxInc = int64(333) - totalLen = preLen + seqLen -) - -type NUID struct { - pre []byte - seq int64 - inc int64 -} - -type lockedNUID struct { - sync.Mutex - *NUID -} - -// Global NUID -var globalNUID *lockedNUID - -// Seed sequential random with crypto or math/random and current time -// and generate crypto prefix. -func init() { - r, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) - if err != nil { - prand.Seed(time.Now().UnixNano()) - } else { - prand.Seed(r.Int64()) - } - globalNUID = &lockedNUID{NUID: New()} - globalNUID.RandomizePrefix() -} - -// New will generate a new NUID and properly initialize the prefix, sequential start, and sequential increment. -func New() *NUID { - n := &NUID{ - seq: prand.Int63n(maxSeq), - inc: minInc + prand.Int63n(maxInc-minInc), - pre: make([]byte, preLen), - } - n.RandomizePrefix() - return n -} - -// Generate the next NUID string from the global locked NUID instance. -func Next() string { - globalNUID.Lock() - nuid := globalNUID.Next() - globalNUID.Unlock() - return nuid -} - -// Generate the next NUID string. -func (n *NUID) Next() string { - // Increment and capture. - n.seq += n.inc - if n.seq >= maxSeq { - n.RandomizePrefix() - n.resetSequential() - } - seq := n.seq - - // Copy prefix - var b [totalLen]byte - bs := b[:preLen] - copy(bs, n.pre) - - // copy in the seq in base62. - for i, l := len(b), seq; i > preLen; l /= base { - i -= 1 - b[i] = digits[l%base] - } - return string(b[:]) -} - -// Resets the sequential portion of the NUID. -func (n *NUID) resetSequential() { - n.seq = prand.Int63n(maxSeq) - n.inc = minInc + prand.Int63n(maxInc-minInc) -} - -// Generate a new prefix from crypto/rand. -// This call *can* drain entropy and will be called automatically when we exhaust the sequential range. -// Will panic if it gets an error from rand.Int() -func (n *NUID) RandomizePrefix() { - var cb [preLen]byte - cbs := cb[:] - if nb, err := rand.Read(cbs); nb != preLen || err != nil { - panic(fmt.Sprintf("nuid: failed generating crypto random number: %v\n", err)) - } - - for i := 0; i < preLen; i++ { - n.pre[i] = digits[int(cbs[i])%base] - } -} diff --git a/backend/services/controller/vendor/github.com/pkg/errors/.gitignore b/backend/services/controller/vendor/github.com/pkg/errors/.gitignore deleted file mode 100644 index daf913b..0000000 --- a/backend/services/controller/vendor/github.com/pkg/errors/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/backend/services/controller/vendor/github.com/pkg/errors/.travis.yml b/backend/services/controller/vendor/github.com/pkg/errors/.travis.yml deleted file mode 100644 index 9159de0..0000000 --- a/backend/services/controller/vendor/github.com/pkg/errors/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go_import_path: github.com/pkg/errors -go: - - 1.11.x - - 1.12.x - - 1.13.x - - tip - -script: - - make check diff --git a/backend/services/controller/vendor/github.com/pkg/errors/LICENSE b/backend/services/controller/vendor/github.com/pkg/errors/LICENSE deleted file mode 100644 index 835ba3e..0000000 --- a/backend/services/controller/vendor/github.com/pkg/errors/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/backend/services/controller/vendor/github.com/pkg/errors/Makefile b/backend/services/controller/vendor/github.com/pkg/errors/Makefile deleted file mode 100644 index ce9d7cd..0000000 --- a/backend/services/controller/vendor/github.com/pkg/errors/Makefile +++ /dev/null @@ -1,44 +0,0 @@ -PKGS := github.com/pkg/errors -SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) -GO := go - -check: test vet gofmt misspell unconvert staticcheck ineffassign unparam - -test: - $(GO) test $(PKGS) - -vet: | test - $(GO) vet $(PKGS) - -staticcheck: - $(GO) get honnef.co/go/tools/cmd/staticcheck - staticcheck -checks all $(PKGS) - -misspell: - $(GO) get github.com/client9/misspell/cmd/misspell - misspell \ - -locale GB \ - -error \ - *.md *.go - -unconvert: - $(GO) get github.com/mdempsky/unconvert - unconvert -v $(PKGS) - -ineffassign: - $(GO) get github.com/gordonklaus/ineffassign - find $(SRCDIRS) -name '*.go' | xargs ineffassign - -pedantic: check errcheck - -unparam: - $(GO) get mvdan.cc/unparam - unparam ./... - -errcheck: - $(GO) get github.com/kisielk/errcheck - errcheck $(PKGS) - -gofmt: - @echo Checking code is gofmted - @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/backend/services/controller/vendor/github.com/pkg/errors/README.md b/backend/services/controller/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 54dfdcb..0000000 --- a/backend/services/controller/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := ioutil.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Roadmap - -With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: - -- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) -- 1.0. Final release. - -## Contributing - -Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. - -Before sending a PR, please discuss your change by raising an issue. - -## License - -BSD-2-Clause diff --git a/backend/services/controller/vendor/github.com/pkg/errors/appveyor.yml b/backend/services/controller/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932ead..0000000 --- a/backend/services/controller/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/backend/services/controller/vendor/github.com/pkg/errors/errors.go b/backend/services/controller/vendor/github.com/pkg/errors/errors.go deleted file mode 100644 index 161aea2..0000000 --- a/backend/services/controller/vendor/github.com/pkg/errors/errors.go +++ /dev/null @@ -1,288 +0,0 @@ -// Package errors provides simple error handling primitives. -// -// The traditional error handling idiom in Go is roughly akin to -// -// if err != nil { -// return err -// } -// -// which when applied recursively up the call stack results in error reports -// without context or debugging information. The errors package allows -// programmers to add context to the failure path in their code in a way -// that does not destroy the original value of the error. -// -// Adding context to an error -// -// The errors.Wrap function returns a new error that adds context to the -// original error by recording a stack trace at the point Wrap is called, -// together with the supplied message. For example -// -// _, err := ioutil.ReadAll(r) -// if err != nil { -// return errors.Wrap(err, "read failed") -// } -// -// If additional control is required, the errors.WithStack and -// errors.WithMessage functions destructure errors.Wrap into its component -// operations: annotating an error with a stack trace and with a message, -// respectively. -// -// Retrieving the cause of an error -// -// Using errors.Wrap constructs a stack of errors, adding context to the -// preceding error. Depending on the nature of the error it may be necessary -// to reverse the operation of errors.Wrap to retrieve the original error -// for inspection. Any error value which implements this interface -// -// type causer interface { -// Cause() error -// } -// -// can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error that does not implement causer, which is assumed to be -// the original cause. For example: -// -// switch err := errors.Cause(err).(type) { -// case *MyError: -// // handle specifically -// default: -// // unknown error -// } -// -// Although the causer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// Formatted printing of errors -// -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported: -// -// %s print the error. If the error has a Cause it will be -// printed recursively. -// %v see %s -// %+v extended format. Each Frame of the error's StackTrace will -// be printed in detail. -// -// Retrieving the stack trace of an error or wrapper -// -// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface: -// -// type stackTracer interface { -// StackTrace() errors.StackTrace -// } -// -// The returned errors.StackTrace type is defined as -// -// type StackTrace []Frame -// -// The Frame type represents a call site in the stack trace. Frame supports -// the fmt.Formatter interface that can be used for printing information about -// the stack trace of this error. For example: -// -// if err, ok := err.(stackTracer); ok { -// for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d\n", f, f) -// } -// } -// -// Although the stackTracer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// See the documentation for Frame.Format for more details. -package errors - -import ( - "fmt" - "io" -) - -// New returns an error with the supplied message. -// New also records the stack trace at the point it was called. -func New(message string) error { - return &fundamental{ - msg: message, - stack: callers(), - } -} - -// Errorf formats according to a format specifier and returns the string -// as a value that satisfies error. -// Errorf also records the stack trace at the point it was called. -func Errorf(format string, args ...interface{}) error { - return &fundamental{ - msg: fmt.Sprintf(format, args...), - stack: callers(), - } -} - -// fundamental is an error that has a message and a stack, but no caller. -type fundamental struct { - msg string - *stack -} - -func (f *fundamental) Error() string { return f.msg } - -func (f *fundamental) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - io.WriteString(s, f.msg) - f.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, f.msg) - case 'q': - fmt.Fprintf(s, "%q", f.msg) - } -} - -// WithStack annotates err with a stack trace at the point WithStack was called. -// If err is nil, WithStack returns nil. -func WithStack(err error) error { - if err == nil { - return nil - } - return &withStack{ - err, - callers(), - } -} - -type withStack struct { - error - *stack -} - -func (w *withStack) Cause() error { return w.error } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withStack) Unwrap() error { return w.error } - -func (w *withStack) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v", w.Cause()) - w.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, w.Error()) - case 'q': - fmt.Fprintf(s, "%q", w.Error()) - } -} - -// Wrap returns an error annotating err with a stack trace -// at the point Wrap is called, and the supplied message. -// If err is nil, Wrap returns nil. -func Wrap(err error, message string) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: message, - } - return &withStack{ - err, - callers(), - } -} - -// Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is called, and the format specifier. -// If err is nil, Wrapf returns nil. -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } - return &withStack{ - err, - callers(), - } -} - -// WithMessage annotates err with a new message. -// If err is nil, WithMessage returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: message, - } -} - -// WithMessagef annotates err with the format specifier. -// If err is nil, WithMessagef returns nil. -func WithMessagef(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } -} - -type withMessage struct { - cause error - msg string -} - -func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } -func (w *withMessage) Cause() error { return w.cause } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withMessage) Unwrap() error { return w.cause } - -func (w *withMessage) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - return - } - fallthrough - case 's', 'q': - io.WriteString(s, w.Error()) - } -} - -// Cause returns the underlying cause of the error, if possible. -// An error value has a cause if it implements the following -// interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func Cause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} diff --git a/backend/services/controller/vendor/github.com/pkg/errors/go113.go b/backend/services/controller/vendor/github.com/pkg/errors/go113.go deleted file mode 100644 index be0d10d..0000000 --- a/backend/services/controller/vendor/github.com/pkg/errors/go113.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build go1.13 - -package errors - -import ( - stderrors "errors" -) - -// Is reports whether any error in err's chain matches target. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error is considered to match a target if it is equal to that target or if -// it implements a method Is(error) bool such that Is(target) returns true. -func Is(err, target error) bool { return stderrors.Is(err, target) } - -// As finds the first error in err's chain that matches target, and if so, sets -// target to that error value and returns true. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error matches target if the error's concrete value is assignable to the value -// pointed to by target, or if the error has a method As(interface{}) bool such that -// As(target) returns true. In the latter case, the As method is responsible for -// setting target. -// -// As will panic if target is not a non-nil pointer to either a type that implements -// error, or to any interface type. As returns false if err is nil. -func As(err error, target interface{}) bool { return stderrors.As(err, target) } - -// Unwrap returns the result of calling the Unwrap method on err, if err's -// type contains an Unwrap method returning error. -// Otherwise, Unwrap returns nil. -func Unwrap(err error) error { - return stderrors.Unwrap(err) -} diff --git a/backend/services/controller/vendor/github.com/pkg/errors/stack.go b/backend/services/controller/vendor/github.com/pkg/errors/stack.go deleted file mode 100644 index 779a834..0000000 --- a/backend/services/controller/vendor/github.com/pkg/errors/stack.go +++ /dev/null @@ -1,177 +0,0 @@ -package errors - -import ( - "fmt" - "io" - "path" - "runtime" - "strconv" - "strings" -) - -// Frame represents a program counter inside a stack frame. -// For historical reasons if Frame is interpreted as a uintptr -// its value represents the program counter + 1. -type Frame uintptr - -// pc returns the program counter for this frame; -// multiple frames may have the same PC value. -func (f Frame) pc() uintptr { return uintptr(f) - 1 } - -// file returns the full path to the file that contains the -// function for this Frame's pc. -func (f Frame) file() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - file, _ := fn.FileLine(f.pc()) - return file -} - -// line returns the line number of source code of the -// function for this Frame's pc. -func (f Frame) line() int { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return 0 - } - _, line := fn.FileLine(f.pc()) - return line -} - -// name returns the name of this function, if known. -func (f Frame) name() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - return fn.Name() -} - -// Format formats the frame according to the fmt.Formatter interface. -// -// %s source file -// %d source line -// %n function name -// %v equivalent to %s:%d -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+s function name and path of source file relative to the compile time -// GOPATH separated by \n\t (\n\t) -// %+v equivalent to %+s:%d -func (f Frame) Format(s fmt.State, verb rune) { - switch verb { - case 's': - switch { - case s.Flag('+'): - io.WriteString(s, f.name()) - io.WriteString(s, "\n\t") - io.WriteString(s, f.file()) - default: - io.WriteString(s, path.Base(f.file())) - } - case 'd': - io.WriteString(s, strconv.Itoa(f.line())) - case 'n': - io.WriteString(s, funcname(f.name())) - case 'v': - f.Format(s, 's') - io.WriteString(s, ":") - f.Format(s, 'd') - } -} - -// MarshalText formats a stacktrace Frame as a text string. The output is the -// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. -func (f Frame) MarshalText() ([]byte, error) { - name := f.name() - if name == "unknown" { - return []byte(name), nil - } - return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil -} - -// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). -type StackTrace []Frame - -// Format formats the stack of Frames according to the fmt.Formatter interface. -// -// %s lists source files for each Frame in the stack -// %v lists the source file and line number for each Frame in the stack -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+v Prints filename, function, and line number for each Frame in the stack. -func (st StackTrace) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case s.Flag('+'): - for _, f := range st { - io.WriteString(s, "\n") - f.Format(s, verb) - } - case s.Flag('#'): - fmt.Fprintf(s, "%#v", []Frame(st)) - default: - st.formatSlice(s, verb) - } - case 's': - st.formatSlice(s, verb) - } -} - -// formatSlice will format this StackTrace into the given buffer as a slice of -// Frame, only valid when called with '%s' or '%v'. -func (st StackTrace) formatSlice(s fmt.State, verb rune) { - io.WriteString(s, "[") - for i, f := range st { - if i > 0 { - io.WriteString(s, " ") - } - f.Format(s, verb) - } - io.WriteString(s, "]") -} - -// stack represents a stack of program counters. -type stack []uintptr - -func (s *stack) Format(st fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case st.Flag('+'): - for _, pc := range *s { - f := Frame(pc) - fmt.Fprintf(st, "\n%+v", f) - } - } - } -} - -func (s *stack) StackTrace() StackTrace { - f := make([]Frame, len(*s)) - for i := 0; i < len(f); i++ { - f[i] = Frame((*s)[i]) - } - return f -} - -func callers() *stack { - const depth = 32 - var pcs [depth]uintptr - n := runtime.Callers(3, pcs[:]) - var st stack = pcs[0:n] - return &st -} - -// funcname removes the path prefix component of a function's name reported by func.Name(). -func funcname(name string) string { - i := strings.LastIndex(name, "/") - name = name[i+1:] - i = strings.Index(name, ".") - return name[i+1:] -} diff --git a/backend/services/controller/vendor/github.com/rs/cors/LICENSE b/backend/services/controller/vendor/github.com/rs/cors/LICENSE deleted file mode 100644 index d8e2df5..0000000 --- a/backend/services/controller/vendor/github.com/rs/cors/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2014 Olivier Poitrey - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is furnished -to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/backend/services/controller/vendor/github.com/rs/cors/README.md b/backend/services/controller/vendor/github.com/rs/cors/README.md deleted file mode 100644 index 0ad3e94..0000000 --- a/backend/services/controller/vendor/github.com/rs/cors/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# Go CORS handler [![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/cors) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/cors/master/LICENSE) [![build](https://img.shields.io/travis/rs/cors.svg?style=flat)](https://travis-ci.org/rs/cors) [![Coverage](http://gocover.io/_badge/github.com/rs/cors)](http://gocover.io/github.com/rs/cors) - -CORS is a `net/http` handler implementing [Cross Origin Resource Sharing W3 specification](http://www.w3.org/TR/cors/) in Golang. - -## Getting Started - -After installing Go and setting up your [GOPATH](http://golang.org/doc/code.html#GOPATH), create your first `.go` file. We'll call it `server.go`. - -```go -package main - -import ( - "net/http" - - "github.com/rs/cors" -) - -func main() { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte("{\"hello\": \"world\"}")) - }) - - // cors.Default() setup the middleware with default options being - // all origins accepted with simple methods (GET, POST). See - // documentation below for more options. - handler := cors.Default().Handler(mux) - http.ListenAndServe(":8080", handler) -} -``` - -Install `cors`: - - go get github.com/rs/cors - -Then run your server: - - go run server.go - -The server now runs on `localhost:8080`: - - $ curl -D - -H 'Origin: http://foo.com' http://localhost:8080/ - HTTP/1.1 200 OK - Access-Control-Allow-Origin: foo.com - Content-Type: application/json - Date: Sat, 25 Oct 2014 03:43:57 GMT - Content-Length: 18 - - {"hello": "world"} - -### Allow * With Credentials Security Protection - -This library has been modified to avoid a well known security issue when configured with `AllowedOrigins` to `*` and `AllowCredentials` to `true`. Such setup used to make the library reflects the request `Origin` header value, working around a security protection embedded into the standard that makes clients to refuse such configuration. This behavior has been removed with [#55](https://github.com/rs/cors/issues/55) and [#57](https://github.com/rs/cors/issues/57). - -If you depend on this behavior and understand the implications, you can restore it using the `AllowOriginFunc` with `func(origin string) {return true}`. - -Please refer to [#55](https://github.com/rs/cors/issues/55) for more information about the security implications. - -### More Examples - -* `net/http`: [examples/nethttp/server.go](https://github.com/rs/cors/blob/master/examples/nethttp/server.go) -* [Goji](https://goji.io): [examples/goji/server.go](https://github.com/rs/cors/blob/master/examples/goji/server.go) -* [Martini](http://martini.codegangsta.io): [examples/martini/server.go](https://github.com/rs/cors/blob/master/examples/martini/server.go) -* [Negroni](https://github.com/codegangsta/negroni): [examples/negroni/server.go](https://github.com/rs/cors/blob/master/examples/negroni/server.go) -* [Alice](https://github.com/justinas/alice): [examples/alice/server.go](https://github.com/rs/cors/blob/master/examples/alice/server.go) -* [HttpRouter](https://github.com/julienschmidt/httprouter): [examples/httprouter/server.go](https://github.com/rs/cors/blob/master/examples/httprouter/server.go) -* [Gorilla](http://www.gorillatoolkit.org/pkg/mux): [examples/gorilla/server.go](https://github.com/rs/cors/blob/master/examples/gorilla/server.go) -* [Buffalo](https://gobuffalo.io): [examples/buffalo/server.go](https://github.com/rs/cors/blob/master/examples/buffalo/server.go) -* [Gin](https://gin-gonic.github.io/gin): [examples/gin/server.go](https://github.com/rs/cors/blob/master/examples/gin/server.go) -* [Chi](https://github.com/go-chi/chi): [examples/chi/server.go](https://github.com/rs/cors/blob/master/examples/chi/server.go) - -## Parameters - -Parameters are passed to the middleware thru the `cors.New` method as follow: - -```go -c := cors.New(cors.Options{ - AllowedOrigins: []string{"http://foo.com", "http://foo.com:8080"}, - AllowCredentials: true, - // Enable Debugging for testing, consider disabling in production - Debug: true, -}) - -// Insert the middleware -handler = c.Handler(handler) -``` - -* **AllowedOrigins** `[]string`: A list of origins a cross-domain request can be executed from. If the special `*` value is present in the list, all origins will be allowed. An origin may contain a wildcard (`*`) to replace 0 or more characters (i.e.: `http://*.domain.com`). Usage of wildcards implies a small performance penality. Only one wildcard can be used per origin. The default value is `*`. -* **AllowOriginFunc** `func (origin string) bool`: A custom function to validate the origin. It takes the origin as an argument and returns true if allowed, or false otherwise. If this option is set, the content of `AllowedOrigins` is ignored. -* **AllowOriginRequestFunc** `func (r *http.Request, origin string) bool`: A custom function to validate the origin. It takes the HTTP Request object and the origin as argument and returns true if allowed or false otherwise. If this option is set, the content of `AllowedOrigins` and `AllowOriginFunc` is ignored -* **AllowedMethods** `[]string`: A list of methods the client is allowed to use with cross-domain requests. Default value is simple methods (`GET` and `POST`). -* **AllowedHeaders** `[]string`: A list of non simple headers the client is allowed to use with cross-domain requests. -* **ExposedHeaders** `[]string`: Indicates which headers are safe to expose to the API of a CORS API specification -* **AllowCredentials** `bool`: Indicates whether the request can include user credentials like cookies, HTTP authentication or client side SSL certificates. The default is `false`. -* **MaxAge** `int`: Indicates how long (in seconds) the results of a preflight request can be cached. The default is `0` which stands for no max age. -* **OptionsPassthrough** `bool`: Instructs preflight to let other potential next handlers to process the `OPTIONS` method. Turn this on if your application handles `OPTIONS`. -* **OptionsSuccessStatus** `int`: Provides a status code to use for successful OPTIONS requests. Default value is `http.StatusNoContent` (`204`). -* **Debug** `bool`: Debugging flag adds additional output to debug server side CORS issues. - -See [API documentation](http://godoc.org/github.com/rs/cors) for more info. - -## Benchmarks - - BenchmarkWithout 20000000 64.6 ns/op 8 B/op 1 allocs/op - BenchmarkDefault 3000000 469 ns/op 114 B/op 2 allocs/op - BenchmarkAllowedOrigin 3000000 608 ns/op 114 B/op 2 allocs/op - BenchmarkPreflight 20000000 73.2 ns/op 0 B/op 0 allocs/op - BenchmarkPreflightHeader 20000000 73.6 ns/op 0 B/op 0 allocs/op - BenchmarkParseHeaderList 2000000 847 ns/op 184 B/op 6 allocs/op - BenchmarkParse…Single 5000000 290 ns/op 32 B/op 3 allocs/op - BenchmarkParse…Normalized 2000000 776 ns/op 160 B/op 6 allocs/op - -## Licenses - -All source code is licensed under the [MIT License](https://raw.github.com/rs/cors/master/LICENSE). diff --git a/backend/services/controller/vendor/github.com/rs/cors/cors.go b/backend/services/controller/vendor/github.com/rs/cors/cors.go deleted file mode 100644 index 5669a67..0000000 --- a/backend/services/controller/vendor/github.com/rs/cors/cors.go +++ /dev/null @@ -1,462 +0,0 @@ -/* -Package cors is net/http handler to handle CORS related requests -as defined by http://www.w3.org/TR/cors/ - -You can configure it by passing an option struct to cors.New: - - c := cors.New(cors.Options{ - AllowedOrigins: []string{"foo.com"}, - AllowedMethods: []string{http.MethodGet, http.MethodPost, http.MethodDelete}, - AllowCredentials: true, - }) - -Then insert the handler in the chain: - - handler = c.Handler(handler) - -See Options documentation for more options. - -The resulting handler is a standard net/http handler. -*/ -package cors - -import ( - "log" - "net/http" - "os" - "strconv" - "strings" -) - -// Options is a configuration container to setup the CORS middleware. -type Options struct { - // AllowedOrigins is a list of origins a cross-domain request can be executed from. - // If the special "*" value is present in the list, all origins will be allowed. - // An origin may contain a wildcard (*) to replace 0 or more characters - // (i.e.: http://*.domain.com). Usage of wildcards implies a small performance penalty. - // Only one wildcard can be used per origin. - // Default value is ["*"] - AllowedOrigins []string - // AllowOriginFunc is a custom function to validate the origin. It take the origin - // as argument and returns true if allowed or false otherwise. If this option is - // set, the content of AllowedOrigins is ignored. - AllowOriginFunc func(origin string) bool - // AllowOriginRequestFunc is a custom function to validate the origin. It takes the HTTP Request object and the origin as - // argument and returns true if allowed or false otherwise. If this option is set, the content of `AllowedOrigins` - // and `AllowOriginFunc` is ignored. - AllowOriginRequestFunc func(r *http.Request, origin string) bool - // AllowedMethods is a list of methods the client is allowed to use with - // cross-domain requests. Default value is simple methods (HEAD, GET and POST). - AllowedMethods []string - // AllowedHeaders is list of non simple headers the client is allowed to use with - // cross-domain requests. - // If the special "*" value is present in the list, all headers will be allowed. - // Default value is [] but "Origin" is always appended to the list. - AllowedHeaders []string - // ExposedHeaders indicates which headers are safe to expose to the API of a CORS - // API specification - ExposedHeaders []string - // MaxAge indicates how long (in seconds) the results of a preflight request - // can be cached - MaxAge int - // AllowCredentials indicates whether the request can include user credentials like - // cookies, HTTP authentication or client side SSL certificates. - AllowCredentials bool - // AllowPrivateNetwork indicates whether to accept cross-origin requests over a - // private network. - AllowPrivateNetwork bool - // OptionsPassthrough instructs preflight to let other potential next handlers to - // process the OPTIONS method. Turn this on if your application handles OPTIONS. - OptionsPassthrough bool - // Provides a status code to use for successful OPTIONS requests. - // Default value is http.StatusNoContent (204). - OptionsSuccessStatus int - // Debugging flag adds additional output to debug server side CORS issues - Debug bool -} - -// Logger generic interface for logger -type Logger interface { - Printf(string, ...interface{}) -} - -// Cors http handler -type Cors struct { - // Debug logger - Log Logger - // Normalized list of plain allowed origins - allowedOrigins []string - // List of allowed origins containing wildcards - allowedWOrigins []wildcard - // Optional origin validator function - allowOriginFunc func(origin string) bool - // Optional origin validator (with request) function - allowOriginRequestFunc func(r *http.Request, origin string) bool - // Normalized list of allowed headers - allowedHeaders []string - // Normalized list of allowed methods - allowedMethods []string - // Normalized list of exposed headers - exposedHeaders []string - maxAge int - // Set to true when allowed origins contains a "*" - allowedOriginsAll bool - // Set to true when allowed headers contains a "*" - allowedHeadersAll bool - // Status code to use for successful OPTIONS requests - optionsSuccessStatus int - allowCredentials bool - allowPrivateNetwork bool - optionPassthrough bool -} - -// New creates a new Cors handler with the provided options. -func New(options Options) *Cors { - c := &Cors{ - exposedHeaders: convert(options.ExposedHeaders, http.CanonicalHeaderKey), - allowOriginFunc: options.AllowOriginFunc, - allowOriginRequestFunc: options.AllowOriginRequestFunc, - allowCredentials: options.AllowCredentials, - allowPrivateNetwork: options.AllowPrivateNetwork, - maxAge: options.MaxAge, - optionPassthrough: options.OptionsPassthrough, - } - if options.Debug && c.Log == nil { - c.Log = log.New(os.Stdout, "[cors] ", log.LstdFlags) - } - - // Normalize options - // Note: for origins and methods matching, the spec requires a case-sensitive matching. - // As it may error prone, we chose to ignore the spec here. - - // Allowed Origins - if len(options.AllowedOrigins) == 0 { - if options.AllowOriginFunc == nil && options.AllowOriginRequestFunc == nil { - // Default is all origins - c.allowedOriginsAll = true - } - } else { - c.allowedOrigins = []string{} - c.allowedWOrigins = []wildcard{} - for _, origin := range options.AllowedOrigins { - // Normalize - origin = strings.ToLower(origin) - if origin == "*" { - // If "*" is present in the list, turn the whole list into a match all - c.allowedOriginsAll = true - c.allowedOrigins = nil - c.allowedWOrigins = nil - break - } else if i := strings.IndexByte(origin, '*'); i >= 0 { - // Split the origin in two: start and end string without the * - w := wildcard{origin[0:i], origin[i+1:]} - c.allowedWOrigins = append(c.allowedWOrigins, w) - } else { - c.allowedOrigins = append(c.allowedOrigins, origin) - } - } - } - - // Allowed Headers - if len(options.AllowedHeaders) == 0 { - // Use sensible defaults - c.allowedHeaders = []string{"Origin", "Accept", "Content-Type", "X-Requested-With"} - } else { - // Origin is always appended as some browsers will always request for this header at preflight - c.allowedHeaders = convert(append(options.AllowedHeaders, "Origin"), http.CanonicalHeaderKey) - for _, h := range options.AllowedHeaders { - if h == "*" { - c.allowedHeadersAll = true - c.allowedHeaders = nil - break - } - } - } - - // Allowed Methods - if len(options.AllowedMethods) == 0 { - // Default is spec's "simple" methods - c.allowedMethods = []string{http.MethodGet, http.MethodPost, http.MethodHead} - } else { - c.allowedMethods = convert(options.AllowedMethods, strings.ToUpper) - } - - // Options Success Status Code - if options.OptionsSuccessStatus == 0 { - c.optionsSuccessStatus = http.StatusNoContent - } else { - c.optionsSuccessStatus = options.OptionsSuccessStatus - } - - return c -} - -// Default creates a new Cors handler with default options. -func Default() *Cors { - return New(Options{}) -} - -// AllowAll create a new Cors handler with permissive configuration allowing all -// origins with all standard methods with any header and credentials. -func AllowAll() *Cors { - return New(Options{ - AllowedOrigins: []string{"*"}, - AllowedMethods: []string{ - http.MethodHead, - http.MethodGet, - http.MethodPost, - http.MethodPut, - http.MethodPatch, - http.MethodDelete, - }, - AllowedHeaders: []string{"*"}, - AllowCredentials: false, - }) -} - -// Handler apply the CORS specification on the request, and add relevant CORS headers -// as necessary. -func (c *Cors) Handler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == http.MethodOptions && r.Header.Get("Access-Control-Request-Method") != "" { - c.logf("Handler: Preflight request") - c.handlePreflight(w, r) - // Preflight requests are standalone and should stop the chain as some other - // middleware may not handle OPTIONS requests correctly. One typical example - // is authentication middleware ; OPTIONS requests won't carry authentication - // headers (see #1) - if c.optionPassthrough { - h.ServeHTTP(w, r) - } else { - w.WriteHeader(c.optionsSuccessStatus) - } - } else { - c.logf("Handler: Actual request") - c.handleActualRequest(w, r) - h.ServeHTTP(w, r) - } - }) -} - -// HandlerFunc provides Martini compatible handler -func (c *Cors) HandlerFunc(w http.ResponseWriter, r *http.Request) { - if r.Method == http.MethodOptions && r.Header.Get("Access-Control-Request-Method") != "" { - c.logf("HandlerFunc: Preflight request") - c.handlePreflight(w, r) - - w.WriteHeader(c.optionsSuccessStatus) - } else { - c.logf("HandlerFunc: Actual request") - c.handleActualRequest(w, r) - } -} - -// Negroni compatible interface -func (c *Cors) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - if r.Method == http.MethodOptions && r.Header.Get("Access-Control-Request-Method") != "" { - c.logf("ServeHTTP: Preflight request") - c.handlePreflight(w, r) - // Preflight requests are standalone and should stop the chain as some other - // middleware may not handle OPTIONS requests correctly. One typical example - // is authentication middleware ; OPTIONS requests won't carry authentication - // headers (see #1) - if c.optionPassthrough { - next(w, r) - } else { - w.WriteHeader(c.optionsSuccessStatus) - } - } else { - c.logf("ServeHTTP: Actual request") - c.handleActualRequest(w, r) - next(w, r) - } -} - -// handlePreflight handles pre-flight CORS requests -func (c *Cors) handlePreflight(w http.ResponseWriter, r *http.Request) { - headers := w.Header() - origin := r.Header.Get("Origin") - - if r.Method != http.MethodOptions { - c.logf(" Preflight aborted: %s!=OPTIONS", r.Method) - return - } - // Always set Vary headers - // see https://github.com/rs/cors/issues/10, - // https://github.com/rs/cors/commit/dbdca4d95feaa7511a46e6f1efb3b3aa505bc43f#commitcomment-12352001 - headers.Add("Vary", "Origin") - headers.Add("Vary", "Access-Control-Request-Method") - headers.Add("Vary", "Access-Control-Request-Headers") - if c.allowPrivateNetwork { - headers.Add("Vary", "Access-Control-Request-Private-Network") - } - - if origin == "" { - c.logf(" Preflight aborted: empty origin") - return - } - if !c.isOriginAllowed(r, origin) { - c.logf(" Preflight aborted: origin '%s' not allowed", origin) - return - } - - reqMethod := r.Header.Get("Access-Control-Request-Method") - if !c.isMethodAllowed(reqMethod) { - c.logf(" Preflight aborted: method '%s' not allowed", reqMethod) - return - } - // Amazon API Gateway is sometimes feeding multiple values for - // Access-Control-Request-Headers in a way where r.Header.Values() picks - // them all up, but r.Header.Get() does not. - // I suspect it is something like this: https://stackoverflow.com/a/4371395 - reqHeaderList := strings.Join(r.Header.Values("Access-Control-Request-Headers"), ",") - reqHeaders := parseHeaderList(reqHeaderList) - if !c.areHeadersAllowed(reqHeaders) { - c.logf(" Preflight aborted: headers '%v' not allowed", reqHeaders) - return - } - if c.allowedOriginsAll { - headers.Set("Access-Control-Allow-Origin", "*") - } else { - headers.Set("Access-Control-Allow-Origin", origin) - } - // Spec says: Since the list of methods can be unbounded, simply returning the method indicated - // by Access-Control-Request-Method (if supported) can be enough - headers.Set("Access-Control-Allow-Methods", strings.ToUpper(reqMethod)) - if len(reqHeaders) > 0 { - - // Spec says: Since the list of headers can be unbounded, simply returning supported headers - // from Access-Control-Request-Headers can be enough - headers.Set("Access-Control-Allow-Headers", strings.Join(reqHeaders, ", ")) - } - if c.allowCredentials { - headers.Set("Access-Control-Allow-Credentials", "true") - } - if c.allowPrivateNetwork && r.Header.Get("Access-Control-Request-Private-Network") == "true" { - headers.Set("Access-Control-Allow-Private-Network", "true") - } - if c.maxAge > 0 { - headers.Set("Access-Control-Max-Age", strconv.Itoa(c.maxAge)) - } - c.logf(" Preflight response headers: %v", headers) -} - -// handleActualRequest handles simple cross-origin requests, actual request or redirects -func (c *Cors) handleActualRequest(w http.ResponseWriter, r *http.Request) { - headers := w.Header() - origin := r.Header.Get("Origin") - - // Always set Vary, see https://github.com/rs/cors/issues/10 - headers.Add("Vary", "Origin") - if origin == "" { - c.logf(" Actual request no headers added: missing origin") - return - } - if !c.isOriginAllowed(r, origin) { - c.logf(" Actual request no headers added: origin '%s' not allowed", origin) - return - } - - // Note that spec does define a way to specifically disallow a simple method like GET or - // POST. Access-Control-Allow-Methods is only used for pre-flight requests and the - // spec doesn't instruct to check the allowed methods for simple cross-origin requests. - // We think it's a nice feature to be able to have control on those methods though. - if !c.isMethodAllowed(r.Method) { - c.logf(" Actual request no headers added: method '%s' not allowed", r.Method) - - return - } - if c.allowedOriginsAll { - headers.Set("Access-Control-Allow-Origin", "*") - } else { - headers.Set("Access-Control-Allow-Origin", origin) - } - if len(c.exposedHeaders) > 0 { - headers.Set("Access-Control-Expose-Headers", strings.Join(c.exposedHeaders, ", ")) - } - if c.allowCredentials { - headers.Set("Access-Control-Allow-Credentials", "true") - } - c.logf(" Actual response added headers: %v", headers) -} - -// convenience method. checks if a logger is set. -func (c *Cors) logf(format string, a ...interface{}) { - if c.Log != nil { - c.Log.Printf(format, a...) - } -} - -// check the Origin of a request. No origin at all is also allowed. -func (c *Cors) OriginAllowed(r *http.Request) bool { - origin := r.Header.Get("Origin") - return c.isOriginAllowed(r, origin) -} - -// isOriginAllowed checks if a given origin is allowed to perform cross-domain requests -// on the endpoint -func (c *Cors) isOriginAllowed(r *http.Request, origin string) bool { - if c.allowOriginRequestFunc != nil { - return c.allowOriginRequestFunc(r, origin) - } - if c.allowOriginFunc != nil { - return c.allowOriginFunc(origin) - } - if c.allowedOriginsAll { - return true - } - origin = strings.ToLower(origin) - for _, o := range c.allowedOrigins { - if o == origin { - return true - } - } - for _, w := range c.allowedWOrigins { - if w.match(origin) { - return true - } - } - return false -} - -// isMethodAllowed checks if a given method can be used as part of a cross-domain request -// on the endpoint -func (c *Cors) isMethodAllowed(method string) bool { - if len(c.allowedMethods) == 0 { - // If no method allowed, always return false, even for preflight request - return false - } - method = strings.ToUpper(method) - if method == http.MethodOptions { - // Always allow preflight requests - return true - } - for _, m := range c.allowedMethods { - if m == method { - return true - } - } - return false -} - -// areHeadersAllowed checks if a given list of headers are allowed to used within -// a cross-domain request. -func (c *Cors) areHeadersAllowed(requestedHeaders []string) bool { - if c.allowedHeadersAll || len(requestedHeaders) == 0 { - return true - } - for _, header := range requestedHeaders { - header = http.CanonicalHeaderKey(header) - found := false - for _, h := range c.allowedHeaders { - if h == header { - found = true - break - } - } - if !found { - return false - } - } - return true -} diff --git a/backend/services/controller/vendor/github.com/rs/cors/utils.go b/backend/services/controller/vendor/github.com/rs/cors/utils.go deleted file mode 100644 index 6bb120c..0000000 --- a/backend/services/controller/vendor/github.com/rs/cors/utils.go +++ /dev/null @@ -1,71 +0,0 @@ -package cors - -import "strings" - -const toLower = 'a' - 'A' - -type converter func(string) string - -type wildcard struct { - prefix string - suffix string -} - -func (w wildcard) match(s string) bool { - return len(s) >= len(w.prefix)+len(w.suffix) && strings.HasPrefix(s, w.prefix) && strings.HasSuffix(s, w.suffix) -} - -// convert converts a list of string using the passed converter function -func convert(s []string, c converter) []string { - out := []string{} - for _, i := range s { - out = append(out, c(i)) - } - return out -} - -// parseHeaderList tokenize + normalize a string containing a list of headers -func parseHeaderList(headerList string) []string { - l := len(headerList) - h := make([]byte, 0, l) - upper := true - // Estimate the number headers in order to allocate the right splice size - t := 0 - for i := 0; i < l; i++ { - if headerList[i] == ',' { - t++ - } - } - headers := make([]string, 0, t) - for i := 0; i < l; i++ { - b := headerList[i] - switch { - case b >= 'a' && b <= 'z': - if upper { - h = append(h, b-toLower) - } else { - h = append(h, b) - } - case b >= 'A' && b <= 'Z': - if !upper { - h = append(h, b+toLower) - } else { - h = append(h, b) - } - case b == '-' || b == '_' || b == '.' || (b >= '0' && b <= '9'): - h = append(h, b) - } - - if b == ' ' || b == ',' || i == l-1 { - if len(h) > 0 { - // Flush the found header - headers = append(headers, string(h)) - h = h[:0] - upper = true - } - } else { - upper = b == '-' || b == '_' - } - } - return headers -} diff --git a/backend/services/controller/vendor/github.com/xdg-go/pbkdf2/.gitignore b/backend/services/controller/vendor/github.com/xdg-go/pbkdf2/.gitignore deleted file mode 100644 index f1c181e..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/pbkdf2/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out diff --git a/backend/services/controller/vendor/github.com/xdg-go/pbkdf2/LICENSE b/backend/services/controller/vendor/github.com/xdg-go/pbkdf2/LICENSE deleted file mode 100644 index 67db858..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/pbkdf2/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/backend/services/controller/vendor/github.com/xdg-go/pbkdf2/README.md b/backend/services/controller/vendor/github.com/xdg-go/pbkdf2/README.md deleted file mode 100644 index d2824e4..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/pbkdf2/README.md +++ /dev/null @@ -1,17 +0,0 @@ -[![Go Reference](https://pkg.go.dev/badge/github.com/xdg-go/pbkdf2.svg)](https://pkg.go.dev/github.com/xdg-go/pbkdf2) -[![Go Report Card](https://goreportcard.com/badge/github.com/xdg-go/pbkdf2)](https://goreportcard.com/report/github.com/xdg-go/pbkdf2) -[![Github Actions](https://github.com/xdg-go/pbkdf2/actions/workflows/test.yml/badge.svg)](https://github.com/xdg-go/pbkdf2/actions/workflows/test.yml) - -# pbkdf2 – Go implementation of PBKDF2 - -## Description - -Package pbkdf2 provides password-based key derivation based on -[RFC 8018](https://tools.ietf.org/html/rfc8018). - -## Copyright and License - -Copyright 2021 by David A. Golden. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"). You may -obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 diff --git a/backend/services/controller/vendor/github.com/xdg-go/pbkdf2/pbkdf2.go b/backend/services/controller/vendor/github.com/xdg-go/pbkdf2/pbkdf2.go deleted file mode 100644 index 029945c..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/pbkdf2/pbkdf2.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2021 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package pbkdf2 implements password-based key derivation using the PBKDF2 -// algorithm described in RFC 2898 and RFC 8018. -// -// It provides a drop-in replacement for `golang.org/x/crypto/pbkdf2`, with -// the following benefits: -// -// - Released as a module with semantic versioning -// -// - Does not pull in dependencies for unrelated `x/crypto/*` packages -// -// - Supports Go 1.9+ -// -// See https://tools.ietf.org/html/rfc8018#section-4 for security considerations -// in the selection of a salt and iteration count. -package pbkdf2 - -import ( - "crypto/hmac" - "encoding/binary" - "hash" -) - -// Key generates a derived key from a password using the PBKDF2 algorithm. The -// inputs include salt bytes, the iteration count, desired key length, and a -// constructor for a hashing function. For example, for a 32-byte key using -// SHA-256: -// -// key := Key([]byte("trustNo1"), salt, 10000, 32, sha256.New) -func Key(password, salt []byte, iterCount, keyLen int, h func() hash.Hash) []byte { - prf := hmac.New(h, password) - hLen := prf.Size() - numBlocks := keyLen / hLen - // Get an extra block if keyLen is not an even number of hLen blocks. - if keyLen%hLen > 0 { - numBlocks++ - } - - Ti := make([]byte, hLen) - Uj := make([]byte, hLen) - dk := make([]byte, 0, hLen*numBlocks) - buf := make([]byte, 4) - - for i := uint32(1); i <= uint32(numBlocks); i++ { - // Initialize Uj for j == 1 from salt and block index. - // Initialize Ti = U1. - binary.BigEndian.PutUint32(buf, i) - prf.Reset() - prf.Write(salt) - prf.Write(buf) - Uj = Uj[:0] - Uj = prf.Sum(Uj) - - // Ti = U1 ^ U2 ^ ... ^ Ux - copy(Ti, Uj) - for j := 2; j <= iterCount; j++ { - prf.Reset() - prf.Write(Uj) - Uj = Uj[:0] - Uj = prf.Sum(Uj) - for k := range Uj { - Ti[k] ^= Uj[k] - } - } - - // DK = concat(T1, T2, ... Tn) - dk = append(dk, Ti...) - } - - return dk[0:keyLen] -} diff --git a/backend/services/controller/vendor/github.com/xdg-go/scram/.gitignore b/backend/services/controller/vendor/github.com/xdg-go/scram/.gitignore deleted file mode 100644 index e69de29..0000000 diff --git a/backend/services/controller/vendor/github.com/xdg-go/scram/CHANGELOG.md b/backend/services/controller/vendor/github.com/xdg-go/scram/CHANGELOG.md deleted file mode 100644 index 21828db..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/scram/CHANGELOG.md +++ /dev/null @@ -1,22 +0,0 @@ -# CHANGELOG - -## v1.1.1 - 2022-03-03 - -- Bump stringprep dependency to v1.0.3 for upstream CVE fix. - -## v1.1.0 - 2022-01-16 - -- Add SHA-512 hash generator function for convenience. - -## v1.0.2 - 2021-03-28 - -- Switch PBKDF2 dependency to github.com/xdg-go/pbkdf2 to - minimize transitive dependencies and support Go 1.9+. - -## v1.0.1 - 2021-03-27 - -- Bump stringprep dependency to v1.0.2 for Go 1.11 support. - -## v1.0.0 - 2021-03-27 - -- First release as a Go module diff --git a/backend/services/controller/vendor/github.com/xdg-go/scram/LICENSE b/backend/services/controller/vendor/github.com/xdg-go/scram/LICENSE deleted file mode 100644 index 67db858..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/scram/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/backend/services/controller/vendor/github.com/xdg-go/scram/README.md b/backend/services/controller/vendor/github.com/xdg-go/scram/README.md deleted file mode 100644 index 3a46f5c..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/scram/README.md +++ /dev/null @@ -1,72 +0,0 @@ -[![Go Reference](https://pkg.go.dev/badge/github.com/xdg-go/scram.svg)](https://pkg.go.dev/github.com/xdg-go/scram) -[![Go Report Card](https://goreportcard.com/badge/github.com/xdg-go/scram)](https://goreportcard.com/report/github.com/xdg-go/scram) -[![Github Actions](https://github.com/xdg-go/scram/actions/workflows/test.yml/badge.svg)](https://github.com/xdg-go/scram/actions/workflows/test.yml) - -# scram – Go implementation of RFC-5802 - -## Description - -Package scram provides client and server implementations of the Salted -Challenge Response Authentication Mechanism (SCRAM) described in -[RFC-5802](https://tools.ietf.org/html/rfc5802) and -[RFC-7677](https://tools.ietf.org/html/rfc7677). - -It includes both client and server side support. - -Channel binding and extensions are not (yet) supported. - -## Examples - -### Client side - - package main - - import "github.com/xdg-go/scram" - - func main() { - // Get Client with username, password and (optional) authorization ID. - clientSHA1, err := scram.SHA1.NewClient("mulder", "trustno1", "") - if err != nil { - panic(err) - } - - // Prepare the authentication conversation. Use the empty string as the - // initial server message argument to start the conversation. - conv := clientSHA1.NewConversation() - var serverMsg string - - // Get the first message, send it and read the response. - firstMsg, err := conv.Step(serverMsg) - if err != nil { - panic(err) - } - serverMsg = sendClientMsg(firstMsg) - - // Get the second message, send it, and read the response. - secondMsg, err := conv.Step(serverMsg) - if err != nil { - panic(err) - } - serverMsg = sendClientMsg(secondMsg) - - // Validate the server's final message. We have no further message to - // send so ignore that return value. - _, err = conv.Step(serverMsg) - if err != nil { - panic(err) - } - - return - } - - func sendClientMsg(s string) string { - // A real implementation would send this to a server and read a reply. - return "" - } - -## Copyright and License - -Copyright 2018 by David A. Golden. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"). You may -obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 diff --git a/backend/services/controller/vendor/github.com/xdg-go/scram/client.go b/backend/services/controller/vendor/github.com/xdg-go/scram/client.go deleted file mode 100644 index 5b53021..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/scram/client.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package scram - -import ( - "sync" - - "github.com/xdg-go/pbkdf2" -) - -// Client implements the client side of SCRAM authentication. It holds -// configuration values needed to initialize new client-side conversations for -// a specific username, password and authorization ID tuple. Client caches -// the computationally-expensive parts of a SCRAM conversation as described in -// RFC-5802. If repeated authentication conversations may be required for a -// user (e.g. disconnect/reconnect), the user's Client should be preserved. -// -// For security reasons, Clients have a default minimum PBKDF2 iteration count -// of 4096. If a server requests a smaller iteration count, an authentication -// conversation will error. -// -// A Client can also be used by a server application to construct the hashed -// authentication values to be stored for a new user. See StoredCredentials() -// for more. -type Client struct { - sync.RWMutex - username string - password string - authzID string - minIters int - nonceGen NonceGeneratorFcn - hashGen HashGeneratorFcn - cache map[KeyFactors]derivedKeys -} - -func newClient(username, password, authzID string, fcn HashGeneratorFcn) *Client { - return &Client{ - username: username, - password: password, - authzID: authzID, - minIters: 4096, - nonceGen: defaultNonceGenerator, - hashGen: fcn, - cache: make(map[KeyFactors]derivedKeys), - } -} - -// WithMinIterations changes minimum required PBKDF2 iteration count. -func (c *Client) WithMinIterations(n int) *Client { - c.Lock() - defer c.Unlock() - c.minIters = n - return c -} - -// WithNonceGenerator replaces the default nonce generator (base64 encoding of -// 24 bytes from crypto/rand) with a custom generator. This is provided for -// testing or for users with custom nonce requirements. -func (c *Client) WithNonceGenerator(ng NonceGeneratorFcn) *Client { - c.Lock() - defer c.Unlock() - c.nonceGen = ng - return c -} - -// NewConversation constructs a client-side authentication conversation. -// Conversations cannot be reused, so this must be called for each new -// authentication attempt. -func (c *Client) NewConversation() *ClientConversation { - c.RLock() - defer c.RUnlock() - return &ClientConversation{ - client: c, - nonceGen: c.nonceGen, - hashGen: c.hashGen, - minIters: c.minIters, - } -} - -func (c *Client) getDerivedKeys(kf KeyFactors) derivedKeys { - dk, ok := c.getCache(kf) - if !ok { - dk = c.computeKeys(kf) - c.setCache(kf, dk) - } - return dk -} - -// GetStoredCredentials takes a salt and iteration count structure and -// provides the values that must be stored by a server to authentication a -// user. These values are what the Server credential lookup function must -// return for a given username. -func (c *Client) GetStoredCredentials(kf KeyFactors) StoredCredentials { - dk := c.getDerivedKeys(kf) - return StoredCredentials{ - KeyFactors: kf, - StoredKey: dk.StoredKey, - ServerKey: dk.ServerKey, - } -} - -func (c *Client) computeKeys(kf KeyFactors) derivedKeys { - h := c.hashGen() - saltedPassword := pbkdf2.Key([]byte(c.password), []byte(kf.Salt), kf.Iters, h.Size(), c.hashGen) - clientKey := computeHMAC(c.hashGen, saltedPassword, []byte("Client Key")) - - return derivedKeys{ - ClientKey: clientKey, - StoredKey: computeHash(c.hashGen, clientKey), - ServerKey: computeHMAC(c.hashGen, saltedPassword, []byte("Server Key")), - } -} - -func (c *Client) getCache(kf KeyFactors) (derivedKeys, bool) { - c.RLock() - defer c.RUnlock() - dk, ok := c.cache[kf] - return dk, ok -} - -func (c *Client) setCache(kf KeyFactors, dk derivedKeys) { - c.Lock() - defer c.Unlock() - c.cache[kf] = dk - return -} diff --git a/backend/services/controller/vendor/github.com/xdg-go/scram/client_conv.go b/backend/services/controller/vendor/github.com/xdg-go/scram/client_conv.go deleted file mode 100644 index 8340568..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/scram/client_conv.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package scram - -import ( - "crypto/hmac" - "encoding/base64" - "errors" - "fmt" - "strings" -) - -type clientState int - -const ( - clientStarting clientState = iota - clientFirst - clientFinal - clientDone -) - -// ClientConversation implements the client-side of an authentication -// conversation with a server. A new conversation must be created for -// each authentication attempt. -type ClientConversation struct { - client *Client - nonceGen NonceGeneratorFcn - hashGen HashGeneratorFcn - minIters int - state clientState - valid bool - gs2 string - nonce string - c1b string - serveSig []byte -} - -// Step takes a string provided from a server (or just an empty string for the -// very first conversation step) and attempts to move the authentication -// conversation forward. It returns a string to be sent to the server or an -// error if the server message is invalid. Calling Step after a conversation -// completes is also an error. -func (cc *ClientConversation) Step(challenge string) (response string, err error) { - switch cc.state { - case clientStarting: - cc.state = clientFirst - response, err = cc.firstMsg() - case clientFirst: - cc.state = clientFinal - response, err = cc.finalMsg(challenge) - case clientFinal: - cc.state = clientDone - response, err = cc.validateServer(challenge) - default: - response, err = "", errors.New("Conversation already completed") - } - return -} - -// Done returns true if the conversation is completed or has errored. -func (cc *ClientConversation) Done() bool { - return cc.state == clientDone -} - -// Valid returns true if the conversation successfully authenticated with the -// server, including counter-validation that the server actually has the -// user's stored credentials. -func (cc *ClientConversation) Valid() bool { - return cc.valid -} - -func (cc *ClientConversation) firstMsg() (string, error) { - // Values are cached for use in final message parameters - cc.gs2 = cc.gs2Header() - cc.nonce = cc.client.nonceGen() - cc.c1b = fmt.Sprintf("n=%s,r=%s", encodeName(cc.client.username), cc.nonce) - - return cc.gs2 + cc.c1b, nil -} - -func (cc *ClientConversation) finalMsg(s1 string) (string, error) { - msg, err := parseServerFirst(s1) - if err != nil { - return "", err - } - - // Check nonce prefix and update - if !strings.HasPrefix(msg.nonce, cc.nonce) { - return "", errors.New("server nonce did not extend client nonce") - } - cc.nonce = msg.nonce - - // Check iteration count vs minimum - if msg.iters < cc.minIters { - return "", fmt.Errorf("server requested too few iterations (%d)", msg.iters) - } - - // Create client-final-message-without-proof - c2wop := fmt.Sprintf( - "c=%s,r=%s", - base64.StdEncoding.EncodeToString([]byte(cc.gs2)), - cc.nonce, - ) - - // Create auth message - authMsg := cc.c1b + "," + s1 + "," + c2wop - - // Get derived keys from client cache - dk := cc.client.getDerivedKeys(KeyFactors{Salt: string(msg.salt), Iters: msg.iters}) - - // Create proof as clientkey XOR clientsignature - clientSignature := computeHMAC(cc.hashGen, dk.StoredKey, []byte(authMsg)) - clientProof := xorBytes(dk.ClientKey, clientSignature) - proof := base64.StdEncoding.EncodeToString(clientProof) - - // Cache ServerSignature for later validation - cc.serveSig = computeHMAC(cc.hashGen, dk.ServerKey, []byte(authMsg)) - - return fmt.Sprintf("%s,p=%s", c2wop, proof), nil -} - -func (cc *ClientConversation) validateServer(s2 string) (string, error) { - msg, err := parseServerFinal(s2) - if err != nil { - return "", err - } - - if len(msg.err) > 0 { - return "", fmt.Errorf("server error: %s", msg.err) - } - - if !hmac.Equal(msg.verifier, cc.serveSig) { - return "", errors.New("server validation failed") - } - - cc.valid = true - return "", nil -} - -func (cc *ClientConversation) gs2Header() string { - if cc.client.authzID == "" { - return "n,," - } - return fmt.Sprintf("n,%s,", encodeName(cc.client.authzID)) -} diff --git a/backend/services/controller/vendor/github.com/xdg-go/scram/common.go b/backend/services/controller/vendor/github.com/xdg-go/scram/common.go deleted file mode 100644 index cb705cb..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/scram/common.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package scram - -import ( - "crypto/hmac" - "crypto/rand" - "encoding/base64" - "strings" -) - -// NonceGeneratorFcn defines a function that returns a string of high-quality -// random printable ASCII characters EXCLUDING the comma (',') character. The -// default nonce generator provides Base64 encoding of 24 bytes from -// crypto/rand. -type NonceGeneratorFcn func() string - -// derivedKeys collects the three cryptographically derived values -// into one struct for caching. -type derivedKeys struct { - ClientKey []byte - StoredKey []byte - ServerKey []byte -} - -// KeyFactors represent the two server-provided factors needed to compute -// client credentials for authentication. Salt is decoded bytes (i.e. not -// base64), but in string form so that KeyFactors can be used as a map key for -// cached credentials. -type KeyFactors struct { - Salt string - Iters int -} - -// StoredCredentials are the values that a server must store for a given -// username to allow authentication. They include the salt and iteration -// count, plus the derived values to authenticate a client and for the server -// to authenticate itself back to the client. -// -// NOTE: these are specific to a given hash function. To allow a user to -// authenticate with either SCRAM-SHA-1 or SCRAM-SHA-256, two sets of -// StoredCredentials must be created and stored, one for each hash function. -type StoredCredentials struct { - KeyFactors - StoredKey []byte - ServerKey []byte -} - -// CredentialLookup is a callback to provide StoredCredentials for a given -// username. This is used to configure Server objects. -// -// NOTE: these are specific to a given hash function. The callback provided -// to a Server with a given hash function must provide the corresponding -// StoredCredentials. -type CredentialLookup func(string) (StoredCredentials, error) - -func defaultNonceGenerator() string { - raw := make([]byte, 24) - nonce := make([]byte, base64.StdEncoding.EncodedLen(len(raw))) - rand.Read(raw) - base64.StdEncoding.Encode(nonce, raw) - return string(nonce) -} - -func encodeName(s string) string { - return strings.Replace(strings.Replace(s, "=", "=3D", -1), ",", "=2C", -1) -} - -func decodeName(s string) (string, error) { - // TODO Check for = not followed by 2C or 3D - return strings.Replace(strings.Replace(s, "=2C", ",", -1), "=3D", "=", -1), nil -} - -func computeHash(hg HashGeneratorFcn, b []byte) []byte { - h := hg() - h.Write(b) - return h.Sum(nil) -} - -func computeHMAC(hg HashGeneratorFcn, key, data []byte) []byte { - mac := hmac.New(hg, key) - mac.Write(data) - return mac.Sum(nil) -} - -func xorBytes(a, b []byte) []byte { - // TODO check a & b are same length, or just xor to smallest - xor := make([]byte, len(a)) - for i := range a { - xor[i] = a[i] ^ b[i] - } - return xor -} diff --git a/backend/services/controller/vendor/github.com/xdg-go/scram/doc.go b/backend/services/controller/vendor/github.com/xdg-go/scram/doc.go deleted file mode 100644 index 82e8aee..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/scram/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package scram provides client and server implementations of the Salted -// Challenge Response Authentication Mechanism (SCRAM) described in RFC-5802 -// and RFC-7677. -// -// Usage -// -// The scram package provides variables, `SHA1`, `SHA256`, and `SHA512`, that -// are used to construct Client or Server objects. -// -// clientSHA1, err := scram.SHA1.NewClient(username, password, authID) -// clientSHA256, err := scram.SHA256.NewClient(username, password, authID) -// clientSHA512, err := scram.SHA512.NewClient(username, password, authID) -// -// serverSHA1, err := scram.SHA1.NewServer(credentialLookupFcn) -// serverSHA256, err := scram.SHA256.NewServer(credentialLookupFcn) -// serverSHA512, err := scram.SHA512.NewServer(credentialLookupFcn) -// -// These objects are used to construct ClientConversation or -// ServerConversation objects that are used to carry out authentication. -package scram diff --git a/backend/services/controller/vendor/github.com/xdg-go/scram/parse.go b/backend/services/controller/vendor/github.com/xdg-go/scram/parse.go deleted file mode 100644 index 722f604..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/scram/parse.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package scram - -import ( - "encoding/base64" - "errors" - "fmt" - "strconv" - "strings" -) - -type c1Msg struct { - gs2Header string - authzID string - username string - nonce string - c1b string -} - -type c2Msg struct { - cbind []byte - nonce string - proof []byte - c2wop string -} - -type s1Msg struct { - nonce string - salt []byte - iters int -} - -type s2Msg struct { - verifier []byte - err string -} - -func parseField(s, k string) (string, error) { - t := strings.TrimPrefix(s, k+"=") - if t == s { - return "", fmt.Errorf("error parsing '%s' for field '%s'", s, k) - } - return t, nil -} - -func parseGS2Flag(s string) (string, error) { - if s[0] == 'p' { - return "", fmt.Errorf("channel binding requested but not supported") - } - - if s == "n" || s == "y" { - return s, nil - } - - return "", fmt.Errorf("error parsing '%s' for gs2 flag", s) -} - -func parseFieldBase64(s, k string) ([]byte, error) { - raw, err := parseField(s, k) - if err != nil { - return nil, err - } - - dec, err := base64.StdEncoding.DecodeString(raw) - if err != nil { - return nil, err - } - - return dec, nil -} - -func parseFieldInt(s, k string) (int, error) { - raw, err := parseField(s, k) - if err != nil { - return 0, err - } - - num, err := strconv.Atoi(raw) - if err != nil { - return 0, fmt.Errorf("error parsing field '%s': %v", k, err) - } - - return num, nil -} - -func parseClientFirst(c1 string) (msg c1Msg, err error) { - - fields := strings.Split(c1, ",") - if len(fields) < 4 { - err = errors.New("not enough fields in first server message") - return - } - - gs2flag, err := parseGS2Flag(fields[0]) - if err != nil { - return - } - - // 'a' field is optional - if len(fields[1]) > 0 { - msg.authzID, err = parseField(fields[1], "a") - if err != nil { - return - } - } - - // Recombine and save the gs2 header - msg.gs2Header = gs2flag + "," + msg.authzID + "," - - // Check for unsupported extensions field "m". - if strings.HasPrefix(fields[2], "m=") { - err = errors.New("SCRAM message extensions are not supported") - return - } - - msg.username, err = parseField(fields[2], "n") - if err != nil { - return - } - - msg.nonce, err = parseField(fields[3], "r") - if err != nil { - return - } - - msg.c1b = strings.Join(fields[2:], ",") - - return -} - -func parseClientFinal(c2 string) (msg c2Msg, err error) { - fields := strings.Split(c2, ",") - if len(fields) < 3 { - err = errors.New("not enough fields in first server message") - return - } - - msg.cbind, err = parseFieldBase64(fields[0], "c") - if err != nil { - return - } - - msg.nonce, err = parseField(fields[1], "r") - if err != nil { - return - } - - // Extension fields may come between nonce and proof, so we - // grab the *last* fields as proof. - msg.proof, err = parseFieldBase64(fields[len(fields)-1], "p") - if err != nil { - return - } - - msg.c2wop = c2[:strings.LastIndex(c2, ",")] - - return -} - -func parseServerFirst(s1 string) (msg s1Msg, err error) { - - // Check for unsupported extensions field "m". - if strings.HasPrefix(s1, "m=") { - err = errors.New("SCRAM message extensions are not supported") - return - } - - fields := strings.Split(s1, ",") - if len(fields) < 3 { - err = errors.New("not enough fields in first server message") - return - } - - msg.nonce, err = parseField(fields[0], "r") - if err != nil { - return - } - - msg.salt, err = parseFieldBase64(fields[1], "s") - if err != nil { - return - } - - msg.iters, err = parseFieldInt(fields[2], "i") - - return -} - -func parseServerFinal(s2 string) (msg s2Msg, err error) { - fields := strings.Split(s2, ",") - - msg.verifier, err = parseFieldBase64(fields[0], "v") - if err == nil { - return - } - - msg.err, err = parseField(fields[0], "e") - - return -} diff --git a/backend/services/controller/vendor/github.com/xdg-go/scram/scram.go b/backend/services/controller/vendor/github.com/xdg-go/scram/scram.go deleted file mode 100644 index a7b3660..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/scram/scram.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package scram - -import ( - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "fmt" - "hash" - - "github.com/xdg-go/stringprep" -) - -// HashGeneratorFcn abstracts a factory function that returns a hash.Hash -// value to be used for SCRAM operations. Generally, one would use the -// provided package variables, `scram.SHA1` and `scram.SHA256`, for the most -// common forms of SCRAM. -type HashGeneratorFcn func() hash.Hash - -// SHA1 is a function that returns a crypto/sha1 hasher and should be used to -// create Client objects configured for SHA-1 hashing. -var SHA1 HashGeneratorFcn = func() hash.Hash { return sha1.New() } - -// SHA256 is a function that returns a crypto/sha256 hasher and should be used -// to create Client objects configured for SHA-256 hashing. -var SHA256 HashGeneratorFcn = func() hash.Hash { return sha256.New() } - -// SHA512 is a function that returns a crypto/sha512 hasher and should be used -// to create Client objects configured for SHA-512 hashing. -var SHA512 HashGeneratorFcn = func() hash.Hash { return sha512.New() } - -// NewClient constructs a SCRAM client component based on a given hash.Hash -// factory receiver. This constructor will normalize the username, password -// and authzID via the SASLprep algorithm, as recommended by RFC-5802. If -// SASLprep fails, the method returns an error. -func (f HashGeneratorFcn) NewClient(username, password, authzID string) (*Client, error) { - var userprep, passprep, authprep string - var err error - - if userprep, err = stringprep.SASLprep.Prepare(username); err != nil { - return nil, fmt.Errorf("Error SASLprepping username '%s': %v", username, err) - } - if passprep, err = stringprep.SASLprep.Prepare(password); err != nil { - return nil, fmt.Errorf("Error SASLprepping password '%s': %v", password, err) - } - if authprep, err = stringprep.SASLprep.Prepare(authzID); err != nil { - return nil, fmt.Errorf("Error SASLprepping authzID '%s': %v", authzID, err) - } - - return newClient(userprep, passprep, authprep, f), nil -} - -// NewClientUnprepped acts like NewClient, except none of the arguments will -// be normalized via SASLprep. This is not generally recommended, but is -// provided for users that may have custom normalization needs. -func (f HashGeneratorFcn) NewClientUnprepped(username, password, authzID string) (*Client, error) { - return newClient(username, password, authzID, f), nil -} - -// NewServer constructs a SCRAM server component based on a given hash.Hash -// factory receiver. To be maximally generic, it uses dependency injection to -// handle credential lookup, which is the process of turning a username string -// into a struct with stored credentials for authentication. -func (f HashGeneratorFcn) NewServer(cl CredentialLookup) (*Server, error) { - return newServer(cl, f) -} diff --git a/backend/services/controller/vendor/github.com/xdg-go/scram/server.go b/backend/services/controller/vendor/github.com/xdg-go/scram/server.go deleted file mode 100644 index b119b36..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/scram/server.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package scram - -import "sync" - -// Server implements the server side of SCRAM authentication. It holds -// configuration values needed to initialize new server-side conversations. -// Generally, this can be persistent within an application. -type Server struct { - sync.RWMutex - credentialCB CredentialLookup - nonceGen NonceGeneratorFcn - hashGen HashGeneratorFcn -} - -func newServer(cl CredentialLookup, fcn HashGeneratorFcn) (*Server, error) { - return &Server{ - credentialCB: cl, - nonceGen: defaultNonceGenerator, - hashGen: fcn, - }, nil -} - -// WithNonceGenerator replaces the default nonce generator (base64 encoding of -// 24 bytes from crypto/rand) with a custom generator. This is provided for -// testing or for users with custom nonce requirements. -func (s *Server) WithNonceGenerator(ng NonceGeneratorFcn) *Server { - s.Lock() - defer s.Unlock() - s.nonceGen = ng - return s -} - -// NewConversation constructs a server-side authentication conversation. -// Conversations cannot be reused, so this must be called for each new -// authentication attempt. -func (s *Server) NewConversation() *ServerConversation { - s.RLock() - defer s.RUnlock() - return &ServerConversation{ - nonceGen: s.nonceGen, - hashGen: s.hashGen, - credentialCB: s.credentialCB, - } -} diff --git a/backend/services/controller/vendor/github.com/xdg-go/scram/server_conv.go b/backend/services/controller/vendor/github.com/xdg-go/scram/server_conv.go deleted file mode 100644 index 9c8838c..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/scram/server_conv.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package scram - -import ( - "crypto/hmac" - "encoding/base64" - "errors" - "fmt" -) - -type serverState int - -const ( - serverFirst serverState = iota - serverFinal - serverDone -) - -// ServerConversation implements the server-side of an authentication -// conversation with a client. A new conversation must be created for -// each authentication attempt. -type ServerConversation struct { - nonceGen NonceGeneratorFcn - hashGen HashGeneratorFcn - credentialCB CredentialLookup - state serverState - credential StoredCredentials - valid bool - gs2Header string - username string - authzID string - nonce string - c1b string - s1 string -} - -// Step takes a string provided from a client and attempts to move the -// authentication conversation forward. It returns a string to be sent to the -// client or an error if the client message is invalid. Calling Step after a -// conversation completes is also an error. -func (sc *ServerConversation) Step(challenge string) (response string, err error) { - switch sc.state { - case serverFirst: - sc.state = serverFinal - response, err = sc.firstMsg(challenge) - case serverFinal: - sc.state = serverDone - response, err = sc.finalMsg(challenge) - default: - response, err = "", errors.New("Conversation already completed") - } - return -} - -// Done returns true if the conversation is completed or has errored. -func (sc *ServerConversation) Done() bool { - return sc.state == serverDone -} - -// Valid returns true if the conversation successfully authenticated the -// client. -func (sc *ServerConversation) Valid() bool { - return sc.valid -} - -// Username returns the client-provided username. This is valid to call -// if the first conversation Step() is successful. -func (sc *ServerConversation) Username() string { - return sc.username -} - -// AuthzID returns the (optional) client-provided authorization identity, if -// any. If one was not provided, it returns the empty string. This is valid -// to call if the first conversation Step() is successful. -func (sc *ServerConversation) AuthzID() string { - return sc.authzID -} - -func (sc *ServerConversation) firstMsg(c1 string) (string, error) { - msg, err := parseClientFirst(c1) - if err != nil { - sc.state = serverDone - return "", err - } - - sc.gs2Header = msg.gs2Header - sc.username = msg.username - sc.authzID = msg.authzID - - sc.credential, err = sc.credentialCB(msg.username) - if err != nil { - sc.state = serverDone - return "e=unknown-user", err - } - - sc.nonce = msg.nonce + sc.nonceGen() - sc.c1b = msg.c1b - sc.s1 = fmt.Sprintf("r=%s,s=%s,i=%d", - sc.nonce, - base64.StdEncoding.EncodeToString([]byte(sc.credential.Salt)), - sc.credential.Iters, - ) - - return sc.s1, nil -} - -// For errors, returns server error message as well as non-nil error. Callers -// can choose whether to send server error or not. -func (sc *ServerConversation) finalMsg(c2 string) (string, error) { - msg, err := parseClientFinal(c2) - if err != nil { - return "", err - } - - // Check channel binding matches what we expect; in this case, we expect - // just the gs2 header we received as we don't support channel binding - // with a data payload. If we add binding, we need to independently - // compute the header to match here. - if string(msg.cbind) != sc.gs2Header { - return "e=channel-bindings-dont-match", fmt.Errorf("channel binding received '%s' doesn't match expected '%s'", msg.cbind, sc.gs2Header) - } - - // Check nonce received matches what we sent - if msg.nonce != sc.nonce { - return "e=other-error", errors.New("nonce received did not match nonce sent") - } - - // Create auth message - authMsg := sc.c1b + "," + sc.s1 + "," + msg.c2wop - - // Retrieve ClientKey from proof and verify it - clientSignature := computeHMAC(sc.hashGen, sc.credential.StoredKey, []byte(authMsg)) - clientKey := xorBytes([]byte(msg.proof), clientSignature) - storedKey := computeHash(sc.hashGen, clientKey) - - // Compare with constant-time function - if !hmac.Equal(storedKey, sc.credential.StoredKey) { - return "e=invalid-proof", errors.New("challenge proof invalid") - } - - sc.valid = true - - // Compute and return server verifier - serverSignature := computeHMAC(sc.hashGen, sc.credential.ServerKey, []byte(authMsg)) - return "v=" + base64.StdEncoding.EncodeToString(serverSignature), nil -} diff --git a/backend/services/controller/vendor/github.com/xdg-go/stringprep/.gitignore b/backend/services/controller/vendor/github.com/xdg-go/stringprep/.gitignore deleted file mode 100644 index e69de29..0000000 diff --git a/backend/services/controller/vendor/github.com/xdg-go/stringprep/CHANGELOG.md b/backend/services/controller/vendor/github.com/xdg-go/stringprep/CHANGELOG.md deleted file mode 100644 index e06787f..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/stringprep/CHANGELOG.md +++ /dev/null @@ -1,29 +0,0 @@ -# CHANGELOG - - -## [v1.0.3] - 2022-03-01 - -### Maintenance - -- Bump golang.org/x/text to v0.3.7 due to CVE-2021-38561 - - -## [v1.0.2] - 2021-03-27 - -### Maintenance - -- Change minimum Go version to 1.11 - - -## [v1.0.1] - 2021-03-24 - -### Bug Fixes - -- Add go.mod file - - -## [v1.0.0] - 2018-02-21 - -[v1.0.2]: https://github.com/xdg-go/stringprep/releases/tag/v1.0.2 -[v1.0.1]: https://github.com/xdg-go/stringprep/releases/tag/v1.0.1 -[v1.0.0]: https://github.com/xdg-go/stringprep/releases/tag/v1.0.0 diff --git a/backend/services/controller/vendor/github.com/xdg-go/stringprep/LICENSE b/backend/services/controller/vendor/github.com/xdg-go/stringprep/LICENSE deleted file mode 100644 index 67db858..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/stringprep/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/backend/services/controller/vendor/github.com/xdg-go/stringprep/README.md b/backend/services/controller/vendor/github.com/xdg-go/stringprep/README.md deleted file mode 100644 index 83ea534..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/stringprep/README.md +++ /dev/null @@ -1,28 +0,0 @@ -[![Go Reference](https://pkg.go.dev/badge/github.com/xdg-go/stringprep.svg)](https://pkg.go.dev/github.com/xdg-go/stringprep) -[![Go Report Card](https://goreportcard.com/badge/github.com/xdg-go/stringprep)](https://goreportcard.com/report/github.com/xdg-go/stringprep) -[![Github Actions](https://github.com/xdg-go/stringprep/actions/workflows/test.yml/badge.svg)](https://github.com/xdg-go/stringprep/actions/workflows/test.yml) - -# stringprep – Go implementation of RFC-3454 stringprep and RFC-4013 SASLprep - -## Synopsis - -``` - import "github.com/xdg-go/stringprep" - - prepped := stringprep.SASLprep.Prepare("TrustNô1") - -``` - -## Description - -This library provides an implementation of the stringprep algorithm -(RFC-3454) in Go, including all data tables. - -A pre-built SASLprep (RFC-4013) profile is provided as well. - -## Copyright and License - -Copyright 2018 by David A. Golden. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"). You may -obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 diff --git a/backend/services/controller/vendor/github.com/xdg-go/stringprep/bidi.go b/backend/services/controller/vendor/github.com/xdg-go/stringprep/bidi.go deleted file mode 100644 index 6f6d321..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/stringprep/bidi.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package stringprep - -var errHasLCat = "BiDi string can't have runes from category L" -var errFirstRune = "BiDi string first rune must have category R or AL" -var errLastRune = "BiDi string last rune must have category R or AL" - -// Check for prohibited characters from table C.8 -func checkBiDiProhibitedRune(s string) error { - for _, r := range s { - if TableC8.Contains(r) { - return Error{Msg: errProhibited, Rune: r} - } - } - return nil -} - -// Check for LCat characters from table D.2 -func checkBiDiLCat(s string) error { - for _, r := range s { - if TableD2.Contains(r) { - return Error{Msg: errHasLCat, Rune: r} - } - } - return nil -} - -// Check first and last characters are in table D.1; requires non-empty string -func checkBadFirstAndLastRandALCat(s string) error { - rs := []rune(s) - if !TableD1.Contains(rs[0]) { - return Error{Msg: errFirstRune, Rune: rs[0]} - } - n := len(rs) - 1 - if !TableD1.Contains(rs[n]) { - return Error{Msg: errLastRune, Rune: rs[n]} - } - return nil -} - -// Look for RandALCat characters from table D.1 -func hasBiDiRandALCat(s string) bool { - for _, r := range s { - if TableD1.Contains(r) { - return true - } - } - return false -} - -// Check that BiDi rules are satisfied ; let empty string pass this rule -func passesBiDiRules(s string) error { - if len(s) == 0 { - return nil - } - if err := checkBiDiProhibitedRune(s); err != nil { - return err - } - if hasBiDiRandALCat(s) { - if err := checkBiDiLCat(s); err != nil { - return err - } - if err := checkBadFirstAndLastRandALCat(s); err != nil { - return err - } - } - return nil -} diff --git a/backend/services/controller/vendor/github.com/xdg-go/stringprep/doc.go b/backend/services/controller/vendor/github.com/xdg-go/stringprep/doc.go deleted file mode 100644 index b319e08..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/stringprep/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package stringprep provides data tables and algorithms for RFC-3454, -// including errata (as of 2018-02). It also provides a profile for -// SASLprep as defined in RFC-4013. -package stringprep diff --git a/backend/services/controller/vendor/github.com/xdg-go/stringprep/error.go b/backend/services/controller/vendor/github.com/xdg-go/stringprep/error.go deleted file mode 100644 index 7403e49..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/stringprep/error.go +++ /dev/null @@ -1,14 +0,0 @@ -package stringprep - -import "fmt" - -// Error describes problems encountered during stringprep, including what rune -// was problematic. -type Error struct { - Msg string - Rune rune -} - -func (e Error) Error() string { - return fmt.Sprintf("%s (rune: '\\u%04x')", e.Msg, e.Rune) -} diff --git a/backend/services/controller/vendor/github.com/xdg-go/stringprep/map.go b/backend/services/controller/vendor/github.com/xdg-go/stringprep/map.go deleted file mode 100644 index e56a0dd..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/stringprep/map.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package stringprep - -// Mapping represents a stringprep mapping, from a single rune to zero or more -// runes. -type Mapping map[rune][]rune - -// Map maps a rune to a (possibly empty) rune slice via a stringprep Mapping. -// The ok return value is false if the rune was not found. -func (m Mapping) Map(r rune) (replacement []rune, ok bool) { - rs, ok := m[r] - if !ok { - return nil, false - } - return rs, true -} diff --git a/backend/services/controller/vendor/github.com/xdg-go/stringprep/profile.go b/backend/services/controller/vendor/github.com/xdg-go/stringprep/profile.go deleted file mode 100644 index 5a73be9..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/stringprep/profile.go +++ /dev/null @@ -1,75 +0,0 @@ -package stringprep - -import ( - "golang.org/x/text/unicode/norm" -) - -// Profile represents a stringprep profile. -type Profile struct { - Mappings []Mapping - Normalize bool - Prohibits []Set - CheckBiDi bool -} - -var errProhibited = "prohibited character" - -// Prepare transforms an input string to an output string following -// the rules defined in the profile as defined by RFC-3454. -func (p Profile) Prepare(s string) (string, error) { - // Optimistically, assume output will be same length as input - temp := make([]rune, 0, len(s)) - - // Apply maps - for _, r := range s { - rs, ok := p.applyMaps(r) - if ok { - temp = append(temp, rs...) - } else { - temp = append(temp, r) - } - } - - // Normalize - var out string - if p.Normalize { - out = norm.NFKC.String(string(temp)) - } else { - out = string(temp) - } - - // Check prohibited - for _, r := range out { - if p.runeIsProhibited(r) { - return "", Error{Msg: errProhibited, Rune: r} - } - } - - // Check BiDi allowed - if p.CheckBiDi { - if err := passesBiDiRules(out); err != nil { - return "", err - } - } - - return out, nil -} - -func (p Profile) applyMaps(r rune) ([]rune, bool) { - for _, m := range p.Mappings { - rs, ok := m.Map(r) - if ok { - return rs, true - } - } - return nil, false -} - -func (p Profile) runeIsProhibited(r rune) bool { - for _, s := range p.Prohibits { - if s.Contains(r) { - return true - } - } - return false -} diff --git a/backend/services/controller/vendor/github.com/xdg-go/stringprep/saslprep.go b/backend/services/controller/vendor/github.com/xdg-go/stringprep/saslprep.go deleted file mode 100644 index 4001348..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/stringprep/saslprep.go +++ /dev/null @@ -1,52 +0,0 @@ -package stringprep - -var mapNonASCIISpaceToASCIISpace = Mapping{ - 0x00A0: []rune{0x0020}, - 0x1680: []rune{0x0020}, - 0x2000: []rune{0x0020}, - 0x2001: []rune{0x0020}, - 0x2002: []rune{0x0020}, - 0x2003: []rune{0x0020}, - 0x2004: []rune{0x0020}, - 0x2005: []rune{0x0020}, - 0x2006: []rune{0x0020}, - 0x2007: []rune{0x0020}, - 0x2008: []rune{0x0020}, - 0x2009: []rune{0x0020}, - 0x200A: []rune{0x0020}, - 0x200B: []rune{0x0020}, - 0x202F: []rune{0x0020}, - 0x205F: []rune{0x0020}, - 0x3000: []rune{0x0020}, -} - -// SASLprep is a pre-defined stringprep profile for user names and passwords -// as described in RFC-4013. -// -// Because the stringprep distinction between query and stored strings was -// intended for compatibility across profile versions, but SASLprep was never -// updated and is now deprecated, this profile only operates in stored -// strings mode, prohibiting unassigned code points. -var SASLprep Profile = saslprep - -var saslprep = Profile{ - Mappings: []Mapping{ - TableB1, - mapNonASCIISpaceToASCIISpace, - }, - Normalize: true, - Prohibits: []Set{ - TableA1, - TableC1_2, - TableC2_1, - TableC2_2, - TableC3, - TableC4, - TableC5, - TableC6, - TableC7, - TableC8, - TableC9, - }, - CheckBiDi: true, -} diff --git a/backend/services/controller/vendor/github.com/xdg-go/stringprep/set.go b/backend/services/controller/vendor/github.com/xdg-go/stringprep/set.go deleted file mode 100644 index c837e28..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/stringprep/set.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package stringprep - -import "sort" - -// RuneRange represents a close-ended range of runes: [N,M]. For a range -// consisting of a single rune, N and M will be equal. -type RuneRange [2]rune - -// Contains returns true if a rune is within the bounds of the RuneRange. -func (rr RuneRange) Contains(r rune) bool { - return rr[0] <= r && r <= rr[1] -} - -func (rr RuneRange) isAbove(r rune) bool { - return r <= rr[0] -} - -// Set represents a stringprep data table used to identify runes of a -// particular type. -type Set []RuneRange - -// Contains returns true if a rune is within any of the RuneRanges in the -// Set. -func (s Set) Contains(r rune) bool { - i := sort.Search(len(s), func(i int) bool { return s[i].Contains(r) || s[i].isAbove(r) }) - if i < len(s) && s[i].Contains(r) { - return true - } - return false -} diff --git a/backend/services/controller/vendor/github.com/xdg-go/stringprep/tables.go b/backend/services/controller/vendor/github.com/xdg-go/stringprep/tables.go deleted file mode 100644 index c3fc1fa..0000000 --- a/backend/services/controller/vendor/github.com/xdg-go/stringprep/tables.go +++ /dev/null @@ -1,3215 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package stringprep - -var tableA1 = Set{ - RuneRange{0x0221, 0x0221}, - RuneRange{0x0234, 0x024F}, - RuneRange{0x02AE, 0x02AF}, - RuneRange{0x02EF, 0x02FF}, - RuneRange{0x0350, 0x035F}, - RuneRange{0x0370, 0x0373}, - RuneRange{0x0376, 0x0379}, - RuneRange{0x037B, 0x037D}, - RuneRange{0x037F, 0x0383}, - RuneRange{0x038B, 0x038B}, - RuneRange{0x038D, 0x038D}, - RuneRange{0x03A2, 0x03A2}, - RuneRange{0x03CF, 0x03CF}, - RuneRange{0x03F7, 0x03FF}, - RuneRange{0x0487, 0x0487}, - RuneRange{0x04CF, 0x04CF}, - RuneRange{0x04F6, 0x04F7}, - RuneRange{0x04FA, 0x04FF}, - RuneRange{0x0510, 0x0530}, - RuneRange{0x0557, 0x0558}, - RuneRange{0x0560, 0x0560}, - RuneRange{0x0588, 0x0588}, - RuneRange{0x058B, 0x0590}, - RuneRange{0x05A2, 0x05A2}, - RuneRange{0x05BA, 0x05BA}, - RuneRange{0x05C5, 0x05CF}, - RuneRange{0x05EB, 0x05EF}, - RuneRange{0x05F5, 0x060B}, - RuneRange{0x060D, 0x061A}, - RuneRange{0x061C, 0x061E}, - RuneRange{0x0620, 0x0620}, - RuneRange{0x063B, 0x063F}, - RuneRange{0x0656, 0x065F}, - RuneRange{0x06EE, 0x06EF}, - RuneRange{0x06FF, 0x06FF}, - RuneRange{0x070E, 0x070E}, - RuneRange{0x072D, 0x072F}, - RuneRange{0x074B, 0x077F}, - RuneRange{0x07B2, 0x0900}, - RuneRange{0x0904, 0x0904}, - RuneRange{0x093A, 0x093B}, - RuneRange{0x094E, 0x094F}, - RuneRange{0x0955, 0x0957}, - RuneRange{0x0971, 0x0980}, - RuneRange{0x0984, 0x0984}, - RuneRange{0x098D, 0x098E}, - RuneRange{0x0991, 0x0992}, - RuneRange{0x09A9, 0x09A9}, - RuneRange{0x09B1, 0x09B1}, - RuneRange{0x09B3, 0x09B5}, - RuneRange{0x09BA, 0x09BB}, - RuneRange{0x09BD, 0x09BD}, - RuneRange{0x09C5, 0x09C6}, - RuneRange{0x09C9, 0x09CA}, - RuneRange{0x09CE, 0x09D6}, - RuneRange{0x09D8, 0x09DB}, - RuneRange{0x09DE, 0x09DE}, - RuneRange{0x09E4, 0x09E5}, - RuneRange{0x09FB, 0x0A01}, - RuneRange{0x0A03, 0x0A04}, - RuneRange{0x0A0B, 0x0A0E}, - RuneRange{0x0A11, 0x0A12}, - RuneRange{0x0A29, 0x0A29}, - RuneRange{0x0A31, 0x0A31}, - RuneRange{0x0A34, 0x0A34}, - RuneRange{0x0A37, 0x0A37}, - RuneRange{0x0A3A, 0x0A3B}, - RuneRange{0x0A3D, 0x0A3D}, - RuneRange{0x0A43, 0x0A46}, - RuneRange{0x0A49, 0x0A4A}, - RuneRange{0x0A4E, 0x0A58}, - RuneRange{0x0A5D, 0x0A5D}, - RuneRange{0x0A5F, 0x0A65}, - RuneRange{0x0A75, 0x0A80}, - RuneRange{0x0A84, 0x0A84}, - RuneRange{0x0A8C, 0x0A8C}, - RuneRange{0x0A8E, 0x0A8E}, - RuneRange{0x0A92, 0x0A92}, - RuneRange{0x0AA9, 0x0AA9}, - RuneRange{0x0AB1, 0x0AB1}, - RuneRange{0x0AB4, 0x0AB4}, - RuneRange{0x0ABA, 0x0ABB}, - RuneRange{0x0AC6, 0x0AC6}, - RuneRange{0x0ACA, 0x0ACA}, - RuneRange{0x0ACE, 0x0ACF}, - RuneRange{0x0AD1, 0x0ADF}, - RuneRange{0x0AE1, 0x0AE5}, - RuneRange{0x0AF0, 0x0B00}, - RuneRange{0x0B04, 0x0B04}, - RuneRange{0x0B0D, 0x0B0E}, - RuneRange{0x0B11, 0x0B12}, - RuneRange{0x0B29, 0x0B29}, - RuneRange{0x0B31, 0x0B31}, - RuneRange{0x0B34, 0x0B35}, - RuneRange{0x0B3A, 0x0B3B}, - RuneRange{0x0B44, 0x0B46}, - RuneRange{0x0B49, 0x0B4A}, - RuneRange{0x0B4E, 0x0B55}, - RuneRange{0x0B58, 0x0B5B}, - RuneRange{0x0B5E, 0x0B5E}, - RuneRange{0x0B62, 0x0B65}, - RuneRange{0x0B71, 0x0B81}, - RuneRange{0x0B84, 0x0B84}, - RuneRange{0x0B8B, 0x0B8D}, - RuneRange{0x0B91, 0x0B91}, - RuneRange{0x0B96, 0x0B98}, - RuneRange{0x0B9B, 0x0B9B}, - RuneRange{0x0B9D, 0x0B9D}, - RuneRange{0x0BA0, 0x0BA2}, - RuneRange{0x0BA5, 0x0BA7}, - RuneRange{0x0BAB, 0x0BAD}, - RuneRange{0x0BB6, 0x0BB6}, - RuneRange{0x0BBA, 0x0BBD}, - RuneRange{0x0BC3, 0x0BC5}, - RuneRange{0x0BC9, 0x0BC9}, - RuneRange{0x0BCE, 0x0BD6}, - RuneRange{0x0BD8, 0x0BE6}, - RuneRange{0x0BF3, 0x0C00}, - RuneRange{0x0C04, 0x0C04}, - RuneRange{0x0C0D, 0x0C0D}, - RuneRange{0x0C11, 0x0C11}, - RuneRange{0x0C29, 0x0C29}, - RuneRange{0x0C34, 0x0C34}, - RuneRange{0x0C3A, 0x0C3D}, - RuneRange{0x0C45, 0x0C45}, - RuneRange{0x0C49, 0x0C49}, - RuneRange{0x0C4E, 0x0C54}, - RuneRange{0x0C57, 0x0C5F}, - RuneRange{0x0C62, 0x0C65}, - RuneRange{0x0C70, 0x0C81}, - RuneRange{0x0C84, 0x0C84}, - RuneRange{0x0C8D, 0x0C8D}, - RuneRange{0x0C91, 0x0C91}, - RuneRange{0x0CA9, 0x0CA9}, - RuneRange{0x0CB4, 0x0CB4}, - RuneRange{0x0CBA, 0x0CBD}, - RuneRange{0x0CC5, 0x0CC5}, - RuneRange{0x0CC9, 0x0CC9}, - RuneRange{0x0CCE, 0x0CD4}, - RuneRange{0x0CD7, 0x0CDD}, - RuneRange{0x0CDF, 0x0CDF}, - RuneRange{0x0CE2, 0x0CE5}, - RuneRange{0x0CF0, 0x0D01}, - RuneRange{0x0D04, 0x0D04}, - RuneRange{0x0D0D, 0x0D0D}, - RuneRange{0x0D11, 0x0D11}, - RuneRange{0x0D29, 0x0D29}, - RuneRange{0x0D3A, 0x0D3D}, - RuneRange{0x0D44, 0x0D45}, - RuneRange{0x0D49, 0x0D49}, - RuneRange{0x0D4E, 0x0D56}, - RuneRange{0x0D58, 0x0D5F}, - RuneRange{0x0D62, 0x0D65}, - RuneRange{0x0D70, 0x0D81}, - RuneRange{0x0D84, 0x0D84}, - RuneRange{0x0D97, 0x0D99}, - RuneRange{0x0DB2, 0x0DB2}, - RuneRange{0x0DBC, 0x0DBC}, - RuneRange{0x0DBE, 0x0DBF}, - RuneRange{0x0DC7, 0x0DC9}, - RuneRange{0x0DCB, 0x0DCE}, - RuneRange{0x0DD5, 0x0DD5}, - RuneRange{0x0DD7, 0x0DD7}, - RuneRange{0x0DE0, 0x0DF1}, - RuneRange{0x0DF5, 0x0E00}, - RuneRange{0x0E3B, 0x0E3E}, - RuneRange{0x0E5C, 0x0E80}, - RuneRange{0x0E83, 0x0E83}, - RuneRange{0x0E85, 0x0E86}, - RuneRange{0x0E89, 0x0E89}, - RuneRange{0x0E8B, 0x0E8C}, - RuneRange{0x0E8E, 0x0E93}, - RuneRange{0x0E98, 0x0E98}, - RuneRange{0x0EA0, 0x0EA0}, - RuneRange{0x0EA4, 0x0EA4}, - RuneRange{0x0EA6, 0x0EA6}, - RuneRange{0x0EA8, 0x0EA9}, - RuneRange{0x0EAC, 0x0EAC}, - RuneRange{0x0EBA, 0x0EBA}, - RuneRange{0x0EBE, 0x0EBF}, - RuneRange{0x0EC5, 0x0EC5}, - RuneRange{0x0EC7, 0x0EC7}, - RuneRange{0x0ECE, 0x0ECF}, - RuneRange{0x0EDA, 0x0EDB}, - RuneRange{0x0EDE, 0x0EFF}, - RuneRange{0x0F48, 0x0F48}, - RuneRange{0x0F6B, 0x0F70}, - RuneRange{0x0F8C, 0x0F8F}, - RuneRange{0x0F98, 0x0F98}, - RuneRange{0x0FBD, 0x0FBD}, - RuneRange{0x0FCD, 0x0FCE}, - RuneRange{0x0FD0, 0x0FFF}, - RuneRange{0x1022, 0x1022}, - RuneRange{0x1028, 0x1028}, - RuneRange{0x102B, 0x102B}, - RuneRange{0x1033, 0x1035}, - RuneRange{0x103A, 0x103F}, - RuneRange{0x105A, 0x109F}, - RuneRange{0x10C6, 0x10CF}, - RuneRange{0x10F9, 0x10FA}, - RuneRange{0x10FC, 0x10FF}, - RuneRange{0x115A, 0x115E}, - RuneRange{0x11A3, 0x11A7}, - RuneRange{0x11FA, 0x11FF}, - RuneRange{0x1207, 0x1207}, - RuneRange{0x1247, 0x1247}, - RuneRange{0x1249, 0x1249}, - RuneRange{0x124E, 0x124F}, - RuneRange{0x1257, 0x1257}, - RuneRange{0x1259, 0x1259}, - RuneRange{0x125E, 0x125F}, - RuneRange{0x1287, 0x1287}, - RuneRange{0x1289, 0x1289}, - RuneRange{0x128E, 0x128F}, - RuneRange{0x12AF, 0x12AF}, - RuneRange{0x12B1, 0x12B1}, - RuneRange{0x12B6, 0x12B7}, - RuneRange{0x12BF, 0x12BF}, - RuneRange{0x12C1, 0x12C1}, - RuneRange{0x12C6, 0x12C7}, - RuneRange{0x12CF, 0x12CF}, - RuneRange{0x12D7, 0x12D7}, - RuneRange{0x12EF, 0x12EF}, - RuneRange{0x130F, 0x130F}, - RuneRange{0x1311, 0x1311}, - RuneRange{0x1316, 0x1317}, - RuneRange{0x131F, 0x131F}, - RuneRange{0x1347, 0x1347}, - RuneRange{0x135B, 0x1360}, - RuneRange{0x137D, 0x139F}, - RuneRange{0x13F5, 0x1400}, - RuneRange{0x1677, 0x167F}, - RuneRange{0x169D, 0x169F}, - RuneRange{0x16F1, 0x16FF}, - RuneRange{0x170D, 0x170D}, - RuneRange{0x1715, 0x171F}, - RuneRange{0x1737, 0x173F}, - RuneRange{0x1754, 0x175F}, - RuneRange{0x176D, 0x176D}, - RuneRange{0x1771, 0x1771}, - RuneRange{0x1774, 0x177F}, - RuneRange{0x17DD, 0x17DF}, - RuneRange{0x17EA, 0x17FF}, - RuneRange{0x180F, 0x180F}, - RuneRange{0x181A, 0x181F}, - RuneRange{0x1878, 0x187F}, - RuneRange{0x18AA, 0x1DFF}, - RuneRange{0x1E9C, 0x1E9F}, - RuneRange{0x1EFA, 0x1EFF}, - RuneRange{0x1F16, 0x1F17}, - RuneRange{0x1F1E, 0x1F1F}, - RuneRange{0x1F46, 0x1F47}, - RuneRange{0x1F4E, 0x1F4F}, - RuneRange{0x1F58, 0x1F58}, - RuneRange{0x1F5A, 0x1F5A}, - RuneRange{0x1F5C, 0x1F5C}, - RuneRange{0x1F5E, 0x1F5E}, - RuneRange{0x1F7E, 0x1F7F}, - RuneRange{0x1FB5, 0x1FB5}, - RuneRange{0x1FC5, 0x1FC5}, - RuneRange{0x1FD4, 0x1FD5}, - RuneRange{0x1FDC, 0x1FDC}, - RuneRange{0x1FF0, 0x1FF1}, - RuneRange{0x1FF5, 0x1FF5}, - RuneRange{0x1FFF, 0x1FFF}, - RuneRange{0x2053, 0x2056}, - RuneRange{0x2058, 0x205E}, - RuneRange{0x2064, 0x2069}, - RuneRange{0x2072, 0x2073}, - RuneRange{0x208F, 0x209F}, - RuneRange{0x20B2, 0x20CF}, - RuneRange{0x20EB, 0x20FF}, - RuneRange{0x213B, 0x213C}, - RuneRange{0x214C, 0x2152}, - RuneRange{0x2184, 0x218F}, - RuneRange{0x23CF, 0x23FF}, - RuneRange{0x2427, 0x243F}, - RuneRange{0x244B, 0x245F}, - RuneRange{0x24FF, 0x24FF}, - RuneRange{0x2614, 0x2615}, - RuneRange{0x2618, 0x2618}, - RuneRange{0x267E, 0x267F}, - RuneRange{0x268A, 0x2700}, - RuneRange{0x2705, 0x2705}, - RuneRange{0x270A, 0x270B}, - RuneRange{0x2728, 0x2728}, - RuneRange{0x274C, 0x274C}, - RuneRange{0x274E, 0x274E}, - RuneRange{0x2753, 0x2755}, - RuneRange{0x2757, 0x2757}, - RuneRange{0x275F, 0x2760}, - RuneRange{0x2795, 0x2797}, - RuneRange{0x27B0, 0x27B0}, - RuneRange{0x27BF, 0x27CF}, - RuneRange{0x27EC, 0x27EF}, - RuneRange{0x2B00, 0x2E7F}, - RuneRange{0x2E9A, 0x2E9A}, - RuneRange{0x2EF4, 0x2EFF}, - RuneRange{0x2FD6, 0x2FEF}, - RuneRange{0x2FFC, 0x2FFF}, - RuneRange{0x3040, 0x3040}, - RuneRange{0x3097, 0x3098}, - RuneRange{0x3100, 0x3104}, - RuneRange{0x312D, 0x3130}, - RuneRange{0x318F, 0x318F}, - RuneRange{0x31B8, 0x31EF}, - RuneRange{0x321D, 0x321F}, - RuneRange{0x3244, 0x3250}, - RuneRange{0x327C, 0x327E}, - RuneRange{0x32CC, 0x32CF}, - RuneRange{0x32FF, 0x32FF}, - RuneRange{0x3377, 0x337A}, - RuneRange{0x33DE, 0x33DF}, - RuneRange{0x33FF, 0x33FF}, - RuneRange{0x4DB6, 0x4DFF}, - RuneRange{0x9FA6, 0x9FFF}, - RuneRange{0xA48D, 0xA48F}, - RuneRange{0xA4C7, 0xABFF}, - RuneRange{0xD7A4, 0xD7FF}, - RuneRange{0xFA2E, 0xFA2F}, - RuneRange{0xFA6B, 0xFAFF}, - RuneRange{0xFB07, 0xFB12}, - RuneRange{0xFB18, 0xFB1C}, - RuneRange{0xFB37, 0xFB37}, - RuneRange{0xFB3D, 0xFB3D}, - RuneRange{0xFB3F, 0xFB3F}, - RuneRange{0xFB42, 0xFB42}, - RuneRange{0xFB45, 0xFB45}, - RuneRange{0xFBB2, 0xFBD2}, - RuneRange{0xFD40, 0xFD4F}, - RuneRange{0xFD90, 0xFD91}, - RuneRange{0xFDC8, 0xFDCF}, - RuneRange{0xFDFD, 0xFDFF}, - RuneRange{0xFE10, 0xFE1F}, - RuneRange{0xFE24, 0xFE2F}, - RuneRange{0xFE47, 0xFE48}, - RuneRange{0xFE53, 0xFE53}, - RuneRange{0xFE67, 0xFE67}, - RuneRange{0xFE6C, 0xFE6F}, - RuneRange{0xFE75, 0xFE75}, - RuneRange{0xFEFD, 0xFEFE}, - RuneRange{0xFF00, 0xFF00}, - RuneRange{0xFFBF, 0xFFC1}, - RuneRange{0xFFC8, 0xFFC9}, - RuneRange{0xFFD0, 0xFFD1}, - RuneRange{0xFFD8, 0xFFD9}, - RuneRange{0xFFDD, 0xFFDF}, - RuneRange{0xFFE7, 0xFFE7}, - RuneRange{0xFFEF, 0xFFF8}, - RuneRange{0x10000, 0x102FF}, - RuneRange{0x1031F, 0x1031F}, - RuneRange{0x10324, 0x1032F}, - RuneRange{0x1034B, 0x103FF}, - RuneRange{0x10426, 0x10427}, - RuneRange{0x1044E, 0x1CFFF}, - RuneRange{0x1D0F6, 0x1D0FF}, - RuneRange{0x1D127, 0x1D129}, - RuneRange{0x1D1DE, 0x1D3FF}, - RuneRange{0x1D455, 0x1D455}, - RuneRange{0x1D49D, 0x1D49D}, - RuneRange{0x1D4A0, 0x1D4A1}, - RuneRange{0x1D4A3, 0x1D4A4}, - RuneRange{0x1D4A7, 0x1D4A8}, - RuneRange{0x1D4AD, 0x1D4AD}, - RuneRange{0x1D4BA, 0x1D4BA}, - RuneRange{0x1D4BC, 0x1D4BC}, - RuneRange{0x1D4C1, 0x1D4C1}, - RuneRange{0x1D4C4, 0x1D4C4}, - RuneRange{0x1D506, 0x1D506}, - RuneRange{0x1D50B, 0x1D50C}, - RuneRange{0x1D515, 0x1D515}, - RuneRange{0x1D51D, 0x1D51D}, - RuneRange{0x1D53A, 0x1D53A}, - RuneRange{0x1D53F, 0x1D53F}, - RuneRange{0x1D545, 0x1D545}, - RuneRange{0x1D547, 0x1D549}, - RuneRange{0x1D551, 0x1D551}, - RuneRange{0x1D6A4, 0x1D6A7}, - RuneRange{0x1D7CA, 0x1D7CD}, - RuneRange{0x1D800, 0x1FFFD}, - RuneRange{0x2A6D7, 0x2F7FF}, - RuneRange{0x2FA1E, 0x2FFFD}, - RuneRange{0x30000, 0x3FFFD}, - RuneRange{0x40000, 0x4FFFD}, - RuneRange{0x50000, 0x5FFFD}, - RuneRange{0x60000, 0x6FFFD}, - RuneRange{0x70000, 0x7FFFD}, - RuneRange{0x80000, 0x8FFFD}, - RuneRange{0x90000, 0x9FFFD}, - RuneRange{0xA0000, 0xAFFFD}, - RuneRange{0xB0000, 0xBFFFD}, - RuneRange{0xC0000, 0xCFFFD}, - RuneRange{0xD0000, 0xDFFFD}, - RuneRange{0xE0000, 0xE0000}, - RuneRange{0xE0002, 0xE001F}, - RuneRange{0xE0080, 0xEFFFD}, -} - -// TableA1 represents RFC-3454 Table A.1. -var TableA1 Set = tableA1 - -var tableB1 = Mapping{ - 0x00AD: []rune{}, // Map to nothing - 0x034F: []rune{}, // Map to nothing - 0x180B: []rune{}, // Map to nothing - 0x180C: []rune{}, // Map to nothing - 0x180D: []rune{}, // Map to nothing - 0x200B: []rune{}, // Map to nothing - 0x200C: []rune{}, // Map to nothing - 0x200D: []rune{}, // Map to nothing - 0x2060: []rune{}, // Map to nothing - 0xFE00: []rune{}, // Map to nothing - 0xFE01: []rune{}, // Map to nothing - 0xFE02: []rune{}, // Map to nothing - 0xFE03: []rune{}, // Map to nothing - 0xFE04: []rune{}, // Map to nothing - 0xFE05: []rune{}, // Map to nothing - 0xFE06: []rune{}, // Map to nothing - 0xFE07: []rune{}, // Map to nothing - 0xFE08: []rune{}, // Map to nothing - 0xFE09: []rune{}, // Map to nothing - 0xFE0A: []rune{}, // Map to nothing - 0xFE0B: []rune{}, // Map to nothing - 0xFE0C: []rune{}, // Map to nothing - 0xFE0D: []rune{}, // Map to nothing - 0xFE0E: []rune{}, // Map to nothing - 0xFE0F: []rune{}, // Map to nothing - 0xFEFF: []rune{}, // Map to nothing -} - -// TableB1 represents RFC-3454 Table B.1. -var TableB1 Mapping = tableB1 - -var tableB2 = Mapping{ - 0x0041: []rune{0x0061}, // Case map - 0x0042: []rune{0x0062}, // Case map - 0x0043: []rune{0x0063}, // Case map - 0x0044: []rune{0x0064}, // Case map - 0x0045: []rune{0x0065}, // Case map - 0x0046: []rune{0x0066}, // Case map - 0x0047: []rune{0x0067}, // Case map - 0x0048: []rune{0x0068}, // Case map - 0x0049: []rune{0x0069}, // Case map - 0x004A: []rune{0x006A}, // Case map - 0x004B: []rune{0x006B}, // Case map - 0x004C: []rune{0x006C}, // Case map - 0x004D: []rune{0x006D}, // Case map - 0x004E: []rune{0x006E}, // Case map - 0x004F: []rune{0x006F}, // Case map - 0x0050: []rune{0x0070}, // Case map - 0x0051: []rune{0x0071}, // Case map - 0x0052: []rune{0x0072}, // Case map - 0x0053: []rune{0x0073}, // Case map - 0x0054: []rune{0x0074}, // Case map - 0x0055: []rune{0x0075}, // Case map - 0x0056: []rune{0x0076}, // Case map - 0x0057: []rune{0x0077}, // Case map - 0x0058: []rune{0x0078}, // Case map - 0x0059: []rune{0x0079}, // Case map - 0x005A: []rune{0x007A}, // Case map - 0x00B5: []rune{0x03BC}, // Case map - 0x00C0: []rune{0x00E0}, // Case map - 0x00C1: []rune{0x00E1}, // Case map - 0x00C2: []rune{0x00E2}, // Case map - 0x00C3: []rune{0x00E3}, // Case map - 0x00C4: []rune{0x00E4}, // Case map - 0x00C5: []rune{0x00E5}, // Case map - 0x00C6: []rune{0x00E6}, // Case map - 0x00C7: []rune{0x00E7}, // Case map - 0x00C8: []rune{0x00E8}, // Case map - 0x00C9: []rune{0x00E9}, // Case map - 0x00CA: []rune{0x00EA}, // Case map - 0x00CB: []rune{0x00EB}, // Case map - 0x00CC: []rune{0x00EC}, // Case map - 0x00CD: []rune{0x00ED}, // Case map - 0x00CE: []rune{0x00EE}, // Case map - 0x00CF: []rune{0x00EF}, // Case map - 0x00D0: []rune{0x00F0}, // Case map - 0x00D1: []rune{0x00F1}, // Case map - 0x00D2: []rune{0x00F2}, // Case map - 0x00D3: []rune{0x00F3}, // Case map - 0x00D4: []rune{0x00F4}, // Case map - 0x00D5: []rune{0x00F5}, // Case map - 0x00D6: []rune{0x00F6}, // Case map - 0x00D8: []rune{0x00F8}, // Case map - 0x00D9: []rune{0x00F9}, // Case map - 0x00DA: []rune{0x00FA}, // Case map - 0x00DB: []rune{0x00FB}, // Case map - 0x00DC: []rune{0x00FC}, // Case map - 0x00DD: []rune{0x00FD}, // Case map - 0x00DE: []rune{0x00FE}, // Case map - 0x00DF: []rune{0x0073, 0x0073}, // Case map - 0x0100: []rune{0x0101}, // Case map - 0x0102: []rune{0x0103}, // Case map - 0x0104: []rune{0x0105}, // Case map - 0x0106: []rune{0x0107}, // Case map - 0x0108: []rune{0x0109}, // Case map - 0x010A: []rune{0x010B}, // Case map - 0x010C: []rune{0x010D}, // Case map - 0x010E: []rune{0x010F}, // Case map - 0x0110: []rune{0x0111}, // Case map - 0x0112: []rune{0x0113}, // Case map - 0x0114: []rune{0x0115}, // Case map - 0x0116: []rune{0x0117}, // Case map - 0x0118: []rune{0x0119}, // Case map - 0x011A: []rune{0x011B}, // Case map - 0x011C: []rune{0x011D}, // Case map - 0x011E: []rune{0x011F}, // Case map - 0x0120: []rune{0x0121}, // Case map - 0x0122: []rune{0x0123}, // Case map - 0x0124: []rune{0x0125}, // Case map - 0x0126: []rune{0x0127}, // Case map - 0x0128: []rune{0x0129}, // Case map - 0x012A: []rune{0x012B}, // Case map - 0x012C: []rune{0x012D}, // Case map - 0x012E: []rune{0x012F}, // Case map - 0x0130: []rune{0x0069, 0x0307}, // Case map - 0x0132: []rune{0x0133}, // Case map - 0x0134: []rune{0x0135}, // Case map - 0x0136: []rune{0x0137}, // Case map - 0x0139: []rune{0x013A}, // Case map - 0x013B: []rune{0x013C}, // Case map - 0x013D: []rune{0x013E}, // Case map - 0x013F: []rune{0x0140}, // Case map - 0x0141: []rune{0x0142}, // Case map - 0x0143: []rune{0x0144}, // Case map - 0x0145: []rune{0x0146}, // Case map - 0x0147: []rune{0x0148}, // Case map - 0x0149: []rune{0x02BC, 0x006E}, // Case map - 0x014A: []rune{0x014B}, // Case map - 0x014C: []rune{0x014D}, // Case map - 0x014E: []rune{0x014F}, // Case map - 0x0150: []rune{0x0151}, // Case map - 0x0152: []rune{0x0153}, // Case map - 0x0154: []rune{0x0155}, // Case map - 0x0156: []rune{0x0157}, // Case map - 0x0158: []rune{0x0159}, // Case map - 0x015A: []rune{0x015B}, // Case map - 0x015C: []rune{0x015D}, // Case map - 0x015E: []rune{0x015F}, // Case map - 0x0160: []rune{0x0161}, // Case map - 0x0162: []rune{0x0163}, // Case map - 0x0164: []rune{0x0165}, // Case map - 0x0166: []rune{0x0167}, // Case map - 0x0168: []rune{0x0169}, // Case map - 0x016A: []rune{0x016B}, // Case map - 0x016C: []rune{0x016D}, // Case map - 0x016E: []rune{0x016F}, // Case map - 0x0170: []rune{0x0171}, // Case map - 0x0172: []rune{0x0173}, // Case map - 0x0174: []rune{0x0175}, // Case map - 0x0176: []rune{0x0177}, // Case map - 0x0178: []rune{0x00FF}, // Case map - 0x0179: []rune{0x017A}, // Case map - 0x017B: []rune{0x017C}, // Case map - 0x017D: []rune{0x017E}, // Case map - 0x017F: []rune{0x0073}, // Case map - 0x0181: []rune{0x0253}, // Case map - 0x0182: []rune{0x0183}, // Case map - 0x0184: []rune{0x0185}, // Case map - 0x0186: []rune{0x0254}, // Case map - 0x0187: []rune{0x0188}, // Case map - 0x0189: []rune{0x0256}, // Case map - 0x018A: []rune{0x0257}, // Case map - 0x018B: []rune{0x018C}, // Case map - 0x018E: []rune{0x01DD}, // Case map - 0x018F: []rune{0x0259}, // Case map - 0x0190: []rune{0x025B}, // Case map - 0x0191: []rune{0x0192}, // Case map - 0x0193: []rune{0x0260}, // Case map - 0x0194: []rune{0x0263}, // Case map - 0x0196: []rune{0x0269}, // Case map - 0x0197: []rune{0x0268}, // Case map - 0x0198: []rune{0x0199}, // Case map - 0x019C: []rune{0x026F}, // Case map - 0x019D: []rune{0x0272}, // Case map - 0x019F: []rune{0x0275}, // Case map - 0x01A0: []rune{0x01A1}, // Case map - 0x01A2: []rune{0x01A3}, // Case map - 0x01A4: []rune{0x01A5}, // Case map - 0x01A6: []rune{0x0280}, // Case map - 0x01A7: []rune{0x01A8}, // Case map - 0x01A9: []rune{0x0283}, // Case map - 0x01AC: []rune{0x01AD}, // Case map - 0x01AE: []rune{0x0288}, // Case map - 0x01AF: []rune{0x01B0}, // Case map - 0x01B1: []rune{0x028A}, // Case map - 0x01B2: []rune{0x028B}, // Case map - 0x01B3: []rune{0x01B4}, // Case map - 0x01B5: []rune{0x01B6}, // Case map - 0x01B7: []rune{0x0292}, // Case map - 0x01B8: []rune{0x01B9}, // Case map - 0x01BC: []rune{0x01BD}, // Case map - 0x01C4: []rune{0x01C6}, // Case map - 0x01C5: []rune{0x01C6}, // Case map - 0x01C7: []rune{0x01C9}, // Case map - 0x01C8: []rune{0x01C9}, // Case map - 0x01CA: []rune{0x01CC}, // Case map - 0x01CB: []rune{0x01CC}, // Case map - 0x01CD: []rune{0x01CE}, // Case map - 0x01CF: []rune{0x01D0}, // Case map - 0x01D1: []rune{0x01D2}, // Case map - 0x01D3: []rune{0x01D4}, // Case map - 0x01D5: []rune{0x01D6}, // Case map - 0x01D7: []rune{0x01D8}, // Case map - 0x01D9: []rune{0x01DA}, // Case map - 0x01DB: []rune{0x01DC}, // Case map - 0x01DE: []rune{0x01DF}, // Case map - 0x01E0: []rune{0x01E1}, // Case map - 0x01E2: []rune{0x01E3}, // Case map - 0x01E4: []rune{0x01E5}, // Case map - 0x01E6: []rune{0x01E7}, // Case map - 0x01E8: []rune{0x01E9}, // Case map - 0x01EA: []rune{0x01EB}, // Case map - 0x01EC: []rune{0x01ED}, // Case map - 0x01EE: []rune{0x01EF}, // Case map - 0x01F0: []rune{0x006A, 0x030C}, // Case map - 0x01F1: []rune{0x01F3}, // Case map - 0x01F2: []rune{0x01F3}, // Case map - 0x01F4: []rune{0x01F5}, // Case map - 0x01F6: []rune{0x0195}, // Case map - 0x01F7: []rune{0x01BF}, // Case map - 0x01F8: []rune{0x01F9}, // Case map - 0x01FA: []rune{0x01FB}, // Case map - 0x01FC: []rune{0x01FD}, // Case map - 0x01FE: []rune{0x01FF}, // Case map - 0x0200: []rune{0x0201}, // Case map - 0x0202: []rune{0x0203}, // Case map - 0x0204: []rune{0x0205}, // Case map - 0x0206: []rune{0x0207}, // Case map - 0x0208: []rune{0x0209}, // Case map - 0x020A: []rune{0x020B}, // Case map - 0x020C: []rune{0x020D}, // Case map - 0x020E: []rune{0x020F}, // Case map - 0x0210: []rune{0x0211}, // Case map - 0x0212: []rune{0x0213}, // Case map - 0x0214: []rune{0x0215}, // Case map - 0x0216: []rune{0x0217}, // Case map - 0x0218: []rune{0x0219}, // Case map - 0x021A: []rune{0x021B}, // Case map - 0x021C: []rune{0x021D}, // Case map - 0x021E: []rune{0x021F}, // Case map - 0x0220: []rune{0x019E}, // Case map - 0x0222: []rune{0x0223}, // Case map - 0x0224: []rune{0x0225}, // Case map - 0x0226: []rune{0x0227}, // Case map - 0x0228: []rune{0x0229}, // Case map - 0x022A: []rune{0x022B}, // Case map - 0x022C: []rune{0x022D}, // Case map - 0x022E: []rune{0x022F}, // Case map - 0x0230: []rune{0x0231}, // Case map - 0x0232: []rune{0x0233}, // Case map - 0x0345: []rune{0x03B9}, // Case map - 0x037A: []rune{0x0020, 0x03B9}, // Additional folding - 0x0386: []rune{0x03AC}, // Case map - 0x0388: []rune{0x03AD}, // Case map - 0x0389: []rune{0x03AE}, // Case map - 0x038A: []rune{0x03AF}, // Case map - 0x038C: []rune{0x03CC}, // Case map - 0x038E: []rune{0x03CD}, // Case map - 0x038F: []rune{0x03CE}, // Case map - 0x0390: []rune{0x03B9, 0x0308, 0x0301}, // Case map - 0x0391: []rune{0x03B1}, // Case map - 0x0392: []rune{0x03B2}, // Case map - 0x0393: []rune{0x03B3}, // Case map - 0x0394: []rune{0x03B4}, // Case map - 0x0395: []rune{0x03B5}, // Case map - 0x0396: []rune{0x03B6}, // Case map - 0x0397: []rune{0x03B7}, // Case map - 0x0398: []rune{0x03B8}, // Case map - 0x0399: []rune{0x03B9}, // Case map - 0x039A: []rune{0x03BA}, // Case map - 0x039B: []rune{0x03BB}, // Case map - 0x039C: []rune{0x03BC}, // Case map - 0x039D: []rune{0x03BD}, // Case map - 0x039E: []rune{0x03BE}, // Case map - 0x039F: []rune{0x03BF}, // Case map - 0x03A0: []rune{0x03C0}, // Case map - 0x03A1: []rune{0x03C1}, // Case map - 0x03A3: []rune{0x03C3}, // Case map - 0x03A4: []rune{0x03C4}, // Case map - 0x03A5: []rune{0x03C5}, // Case map - 0x03A6: []rune{0x03C6}, // Case map - 0x03A7: []rune{0x03C7}, // Case map - 0x03A8: []rune{0x03C8}, // Case map - 0x03A9: []rune{0x03C9}, // Case map - 0x03AA: []rune{0x03CA}, // Case map - 0x03AB: []rune{0x03CB}, // Case map - 0x03B0: []rune{0x03C5, 0x0308, 0x0301}, // Case map - 0x03C2: []rune{0x03C3}, // Case map - 0x03D0: []rune{0x03B2}, // Case map - 0x03D1: []rune{0x03B8}, // Case map - 0x03D2: []rune{0x03C5}, // Additional folding - 0x03D3: []rune{0x03CD}, // Additional folding - 0x03D4: []rune{0x03CB}, // Additional folding - 0x03D5: []rune{0x03C6}, // Case map - 0x03D6: []rune{0x03C0}, // Case map - 0x03D8: []rune{0x03D9}, // Case map - 0x03DA: []rune{0x03DB}, // Case map - 0x03DC: []rune{0x03DD}, // Case map - 0x03DE: []rune{0x03DF}, // Case map - 0x03E0: []rune{0x03E1}, // Case map - 0x03E2: []rune{0x03E3}, // Case map - 0x03E4: []rune{0x03E5}, // Case map - 0x03E6: []rune{0x03E7}, // Case map - 0x03E8: []rune{0x03E9}, // Case map - 0x03EA: []rune{0x03EB}, // Case map - 0x03EC: []rune{0x03ED}, // Case map - 0x03EE: []rune{0x03EF}, // Case map - 0x03F0: []rune{0x03BA}, // Case map - 0x03F1: []rune{0x03C1}, // Case map - 0x03F2: []rune{0x03C3}, // Case map - 0x03F4: []rune{0x03B8}, // Case map - 0x03F5: []rune{0x03B5}, // Case map - 0x0400: []rune{0x0450}, // Case map - 0x0401: []rune{0x0451}, // Case map - 0x0402: []rune{0x0452}, // Case map - 0x0403: []rune{0x0453}, // Case map - 0x0404: []rune{0x0454}, // Case map - 0x0405: []rune{0x0455}, // Case map - 0x0406: []rune{0x0456}, // Case map - 0x0407: []rune{0x0457}, // Case map - 0x0408: []rune{0x0458}, // Case map - 0x0409: []rune{0x0459}, // Case map - 0x040A: []rune{0x045A}, // Case map - 0x040B: []rune{0x045B}, // Case map - 0x040C: []rune{0x045C}, // Case map - 0x040D: []rune{0x045D}, // Case map - 0x040E: []rune{0x045E}, // Case map - 0x040F: []rune{0x045F}, // Case map - 0x0410: []rune{0x0430}, // Case map - 0x0411: []rune{0x0431}, // Case map - 0x0412: []rune{0x0432}, // Case map - 0x0413: []rune{0x0433}, // Case map - 0x0414: []rune{0x0434}, // Case map - 0x0415: []rune{0x0435}, // Case map - 0x0416: []rune{0x0436}, // Case map - 0x0417: []rune{0x0437}, // Case map - 0x0418: []rune{0x0438}, // Case map - 0x0419: []rune{0x0439}, // Case map - 0x041A: []rune{0x043A}, // Case map - 0x041B: []rune{0x043B}, // Case map - 0x041C: []rune{0x043C}, // Case map - 0x041D: []rune{0x043D}, // Case map - 0x041E: []rune{0x043E}, // Case map - 0x041F: []rune{0x043F}, // Case map - 0x0420: []rune{0x0440}, // Case map - 0x0421: []rune{0x0441}, // Case map - 0x0422: []rune{0x0442}, // Case map - 0x0423: []rune{0x0443}, // Case map - 0x0424: []rune{0x0444}, // Case map - 0x0425: []rune{0x0445}, // Case map - 0x0426: []rune{0x0446}, // Case map - 0x0427: []rune{0x0447}, // Case map - 0x0428: []rune{0x0448}, // Case map - 0x0429: []rune{0x0449}, // Case map - 0x042A: []rune{0x044A}, // Case map - 0x042B: []rune{0x044B}, // Case map - 0x042C: []rune{0x044C}, // Case map - 0x042D: []rune{0x044D}, // Case map - 0x042E: []rune{0x044E}, // Case map - 0x042F: []rune{0x044F}, // Case map - 0x0460: []rune{0x0461}, // Case map - 0x0462: []rune{0x0463}, // Case map - 0x0464: []rune{0x0465}, // Case map - 0x0466: []rune{0x0467}, // Case map - 0x0468: []rune{0x0469}, // Case map - 0x046A: []rune{0x046B}, // Case map - 0x046C: []rune{0x046D}, // Case map - 0x046E: []rune{0x046F}, // Case map - 0x0470: []rune{0x0471}, // Case map - 0x0472: []rune{0x0473}, // Case map - 0x0474: []rune{0x0475}, // Case map - 0x0476: []rune{0x0477}, // Case map - 0x0478: []rune{0x0479}, // Case map - 0x047A: []rune{0x047B}, // Case map - 0x047C: []rune{0x047D}, // Case map - 0x047E: []rune{0x047F}, // Case map - 0x0480: []rune{0x0481}, // Case map - 0x048A: []rune{0x048B}, // Case map - 0x048C: []rune{0x048D}, // Case map - 0x048E: []rune{0x048F}, // Case map - 0x0490: []rune{0x0491}, // Case map - 0x0492: []rune{0x0493}, // Case map - 0x0494: []rune{0x0495}, // Case map - 0x0496: []rune{0x0497}, // Case map - 0x0498: []rune{0x0499}, // Case map - 0x049A: []rune{0x049B}, // Case map - 0x049C: []rune{0x049D}, // Case map - 0x049E: []rune{0x049F}, // Case map - 0x04A0: []rune{0x04A1}, // Case map - 0x04A2: []rune{0x04A3}, // Case map - 0x04A4: []rune{0x04A5}, // Case map - 0x04A6: []rune{0x04A7}, // Case map - 0x04A8: []rune{0x04A9}, // Case map - 0x04AA: []rune{0x04AB}, // Case map - 0x04AC: []rune{0x04AD}, // Case map - 0x04AE: []rune{0x04AF}, // Case map - 0x04B0: []rune{0x04B1}, // Case map - 0x04B2: []rune{0x04B3}, // Case map - 0x04B4: []rune{0x04B5}, // Case map - 0x04B6: []rune{0x04B7}, // Case map - 0x04B8: []rune{0x04B9}, // Case map - 0x04BA: []rune{0x04BB}, // Case map - 0x04BC: []rune{0x04BD}, // Case map - 0x04BE: []rune{0x04BF}, // Case map - 0x04C1: []rune{0x04C2}, // Case map - 0x04C3: []rune{0x04C4}, // Case map - 0x04C5: []rune{0x04C6}, // Case map - 0x04C7: []rune{0x04C8}, // Case map - 0x04C9: []rune{0x04CA}, // Case map - 0x04CB: []rune{0x04CC}, // Case map - 0x04CD: []rune{0x04CE}, // Case map - 0x04D0: []rune{0x04D1}, // Case map - 0x04D2: []rune{0x04D3}, // Case map - 0x04D4: []rune{0x04D5}, // Case map - 0x04D6: []rune{0x04D7}, // Case map - 0x04D8: []rune{0x04D9}, // Case map - 0x04DA: []rune{0x04DB}, // Case map - 0x04DC: []rune{0x04DD}, // Case map - 0x04DE: []rune{0x04DF}, // Case map - 0x04E0: []rune{0x04E1}, // Case map - 0x04E2: []rune{0x04E3}, // Case map - 0x04E4: []rune{0x04E5}, // Case map - 0x04E6: []rune{0x04E7}, // Case map - 0x04E8: []rune{0x04E9}, // Case map - 0x04EA: []rune{0x04EB}, // Case map - 0x04EC: []rune{0x04ED}, // Case map - 0x04EE: []rune{0x04EF}, // Case map - 0x04F0: []rune{0x04F1}, // Case map - 0x04F2: []rune{0x04F3}, // Case map - 0x04F4: []rune{0x04F5}, // Case map - 0x04F8: []rune{0x04F9}, // Case map - 0x0500: []rune{0x0501}, // Case map - 0x0502: []rune{0x0503}, // Case map - 0x0504: []rune{0x0505}, // Case map - 0x0506: []rune{0x0507}, // Case map - 0x0508: []rune{0x0509}, // Case map - 0x050A: []rune{0x050B}, // Case map - 0x050C: []rune{0x050D}, // Case map - 0x050E: []rune{0x050F}, // Case map - 0x0531: []rune{0x0561}, // Case map - 0x0532: []rune{0x0562}, // Case map - 0x0533: []rune{0x0563}, // Case map - 0x0534: []rune{0x0564}, // Case map - 0x0535: []rune{0x0565}, // Case map - 0x0536: []rune{0x0566}, // Case map - 0x0537: []rune{0x0567}, // Case map - 0x0538: []rune{0x0568}, // Case map - 0x0539: []rune{0x0569}, // Case map - 0x053A: []rune{0x056A}, // Case map - 0x053B: []rune{0x056B}, // Case map - 0x053C: []rune{0x056C}, // Case map - 0x053D: []rune{0x056D}, // Case map - 0x053E: []rune{0x056E}, // Case map - 0x053F: []rune{0x056F}, // Case map - 0x0540: []rune{0x0570}, // Case map - 0x0541: []rune{0x0571}, // Case map - 0x0542: []rune{0x0572}, // Case map - 0x0543: []rune{0x0573}, // Case map - 0x0544: []rune{0x0574}, // Case map - 0x0545: []rune{0x0575}, // Case map - 0x0546: []rune{0x0576}, // Case map - 0x0547: []rune{0x0577}, // Case map - 0x0548: []rune{0x0578}, // Case map - 0x0549: []rune{0x0579}, // Case map - 0x054A: []rune{0x057A}, // Case map - 0x054B: []rune{0x057B}, // Case map - 0x054C: []rune{0x057C}, // Case map - 0x054D: []rune{0x057D}, // Case map - 0x054E: []rune{0x057E}, // Case map - 0x054F: []rune{0x057F}, // Case map - 0x0550: []rune{0x0580}, // Case map - 0x0551: []rune{0x0581}, // Case map - 0x0552: []rune{0x0582}, // Case map - 0x0553: []rune{0x0583}, // Case map - 0x0554: []rune{0x0584}, // Case map - 0x0555: []rune{0x0585}, // Case map - 0x0556: []rune{0x0586}, // Case map - 0x0587: []rune{0x0565, 0x0582}, // Case map - 0x1E00: []rune{0x1E01}, // Case map - 0x1E02: []rune{0x1E03}, // Case map - 0x1E04: []rune{0x1E05}, // Case map - 0x1E06: []rune{0x1E07}, // Case map - 0x1E08: []rune{0x1E09}, // Case map - 0x1E0A: []rune{0x1E0B}, // Case map - 0x1E0C: []rune{0x1E0D}, // Case map - 0x1E0E: []rune{0x1E0F}, // Case map - 0x1E10: []rune{0x1E11}, // Case map - 0x1E12: []rune{0x1E13}, // Case map - 0x1E14: []rune{0x1E15}, // Case map - 0x1E16: []rune{0x1E17}, // Case map - 0x1E18: []rune{0x1E19}, // Case map - 0x1E1A: []rune{0x1E1B}, // Case map - 0x1E1C: []rune{0x1E1D}, // Case map - 0x1E1E: []rune{0x1E1F}, // Case map - 0x1E20: []rune{0x1E21}, // Case map - 0x1E22: []rune{0x1E23}, // Case map - 0x1E24: []rune{0x1E25}, // Case map - 0x1E26: []rune{0x1E27}, // Case map - 0x1E28: []rune{0x1E29}, // Case map - 0x1E2A: []rune{0x1E2B}, // Case map - 0x1E2C: []rune{0x1E2D}, // Case map - 0x1E2E: []rune{0x1E2F}, // Case map - 0x1E30: []rune{0x1E31}, // Case map - 0x1E32: []rune{0x1E33}, // Case map - 0x1E34: []rune{0x1E35}, // Case map - 0x1E36: []rune{0x1E37}, // Case map - 0x1E38: []rune{0x1E39}, // Case map - 0x1E3A: []rune{0x1E3B}, // Case map - 0x1E3C: []rune{0x1E3D}, // Case map - 0x1E3E: []rune{0x1E3F}, // Case map - 0x1E40: []rune{0x1E41}, // Case map - 0x1E42: []rune{0x1E43}, // Case map - 0x1E44: []rune{0x1E45}, // Case map - 0x1E46: []rune{0x1E47}, // Case map - 0x1E48: []rune{0x1E49}, // Case map - 0x1E4A: []rune{0x1E4B}, // Case map - 0x1E4C: []rune{0x1E4D}, // Case map - 0x1E4E: []rune{0x1E4F}, // Case map - 0x1E50: []rune{0x1E51}, // Case map - 0x1E52: []rune{0x1E53}, // Case map - 0x1E54: []rune{0x1E55}, // Case map - 0x1E56: []rune{0x1E57}, // Case map - 0x1E58: []rune{0x1E59}, // Case map - 0x1E5A: []rune{0x1E5B}, // Case map - 0x1E5C: []rune{0x1E5D}, // Case map - 0x1E5E: []rune{0x1E5F}, // Case map - 0x1E60: []rune{0x1E61}, // Case map - 0x1E62: []rune{0x1E63}, // Case map - 0x1E64: []rune{0x1E65}, // Case map - 0x1E66: []rune{0x1E67}, // Case map - 0x1E68: []rune{0x1E69}, // Case map - 0x1E6A: []rune{0x1E6B}, // Case map - 0x1E6C: []rune{0x1E6D}, // Case map - 0x1E6E: []rune{0x1E6F}, // Case map - 0x1E70: []rune{0x1E71}, // Case map - 0x1E72: []rune{0x1E73}, // Case map - 0x1E74: []rune{0x1E75}, // Case map - 0x1E76: []rune{0x1E77}, // Case map - 0x1E78: []rune{0x1E79}, // Case map - 0x1E7A: []rune{0x1E7B}, // Case map - 0x1E7C: []rune{0x1E7D}, // Case map - 0x1E7E: []rune{0x1E7F}, // Case map - 0x1E80: []rune{0x1E81}, // Case map - 0x1E82: []rune{0x1E83}, // Case map - 0x1E84: []rune{0x1E85}, // Case map - 0x1E86: []rune{0x1E87}, // Case map - 0x1E88: []rune{0x1E89}, // Case map - 0x1E8A: []rune{0x1E8B}, // Case map - 0x1E8C: []rune{0x1E8D}, // Case map - 0x1E8E: []rune{0x1E8F}, // Case map - 0x1E90: []rune{0x1E91}, // Case map - 0x1E92: []rune{0x1E93}, // Case map - 0x1E94: []rune{0x1E95}, // Case map - 0x1E96: []rune{0x0068, 0x0331}, // Case map - 0x1E97: []rune{0x0074, 0x0308}, // Case map - 0x1E98: []rune{0x0077, 0x030A}, // Case map - 0x1E99: []rune{0x0079, 0x030A}, // Case map - 0x1E9A: []rune{0x0061, 0x02BE}, // Case map - 0x1E9B: []rune{0x1E61}, // Case map - 0x1EA0: []rune{0x1EA1}, // Case map - 0x1EA2: []rune{0x1EA3}, // Case map - 0x1EA4: []rune{0x1EA5}, // Case map - 0x1EA6: []rune{0x1EA7}, // Case map - 0x1EA8: []rune{0x1EA9}, // Case map - 0x1EAA: []rune{0x1EAB}, // Case map - 0x1EAC: []rune{0x1EAD}, // Case map - 0x1EAE: []rune{0x1EAF}, // Case map - 0x1EB0: []rune{0x1EB1}, // Case map - 0x1EB2: []rune{0x1EB3}, // Case map - 0x1EB4: []rune{0x1EB5}, // Case map - 0x1EB6: []rune{0x1EB7}, // Case map - 0x1EB8: []rune{0x1EB9}, // Case map - 0x1EBA: []rune{0x1EBB}, // Case map - 0x1EBC: []rune{0x1EBD}, // Case map - 0x1EBE: []rune{0x1EBF}, // Case map - 0x1EC0: []rune{0x1EC1}, // Case map - 0x1EC2: []rune{0x1EC3}, // Case map - 0x1EC4: []rune{0x1EC5}, // Case map - 0x1EC6: []rune{0x1EC7}, // Case map - 0x1EC8: []rune{0x1EC9}, // Case map - 0x1ECA: []rune{0x1ECB}, // Case map - 0x1ECC: []rune{0x1ECD}, // Case map - 0x1ECE: []rune{0x1ECF}, // Case map - 0x1ED0: []rune{0x1ED1}, // Case map - 0x1ED2: []rune{0x1ED3}, // Case map - 0x1ED4: []rune{0x1ED5}, // Case map - 0x1ED6: []rune{0x1ED7}, // Case map - 0x1ED8: []rune{0x1ED9}, // Case map - 0x1EDA: []rune{0x1EDB}, // Case map - 0x1EDC: []rune{0x1EDD}, // Case map - 0x1EDE: []rune{0x1EDF}, // Case map - 0x1EE0: []rune{0x1EE1}, // Case map - 0x1EE2: []rune{0x1EE3}, // Case map - 0x1EE4: []rune{0x1EE5}, // Case map - 0x1EE6: []rune{0x1EE7}, // Case map - 0x1EE8: []rune{0x1EE9}, // Case map - 0x1EEA: []rune{0x1EEB}, // Case map - 0x1EEC: []rune{0x1EED}, // Case map - 0x1EEE: []rune{0x1EEF}, // Case map - 0x1EF0: []rune{0x1EF1}, // Case map - 0x1EF2: []rune{0x1EF3}, // Case map - 0x1EF4: []rune{0x1EF5}, // Case map - 0x1EF6: []rune{0x1EF7}, // Case map - 0x1EF8: []rune{0x1EF9}, // Case map - 0x1F08: []rune{0x1F00}, // Case map - 0x1F09: []rune{0x1F01}, // Case map - 0x1F0A: []rune{0x1F02}, // Case map - 0x1F0B: []rune{0x1F03}, // Case map - 0x1F0C: []rune{0x1F04}, // Case map - 0x1F0D: []rune{0x1F05}, // Case map - 0x1F0E: []rune{0x1F06}, // Case map - 0x1F0F: []rune{0x1F07}, // Case map - 0x1F18: []rune{0x1F10}, // Case map - 0x1F19: []rune{0x1F11}, // Case map - 0x1F1A: []rune{0x1F12}, // Case map - 0x1F1B: []rune{0x1F13}, // Case map - 0x1F1C: []rune{0x1F14}, // Case map - 0x1F1D: []rune{0x1F15}, // Case map - 0x1F28: []rune{0x1F20}, // Case map - 0x1F29: []rune{0x1F21}, // Case map - 0x1F2A: []rune{0x1F22}, // Case map - 0x1F2B: []rune{0x1F23}, // Case map - 0x1F2C: []rune{0x1F24}, // Case map - 0x1F2D: []rune{0x1F25}, // Case map - 0x1F2E: []rune{0x1F26}, // Case map - 0x1F2F: []rune{0x1F27}, // Case map - 0x1F38: []rune{0x1F30}, // Case map - 0x1F39: []rune{0x1F31}, // Case map - 0x1F3A: []rune{0x1F32}, // Case map - 0x1F3B: []rune{0x1F33}, // Case map - 0x1F3C: []rune{0x1F34}, // Case map - 0x1F3D: []rune{0x1F35}, // Case map - 0x1F3E: []rune{0x1F36}, // Case map - 0x1F3F: []rune{0x1F37}, // Case map - 0x1F48: []rune{0x1F40}, // Case map - 0x1F49: []rune{0x1F41}, // Case map - 0x1F4A: []rune{0x1F42}, // Case map - 0x1F4B: []rune{0x1F43}, // Case map - 0x1F4C: []rune{0x1F44}, // Case map - 0x1F4D: []rune{0x1F45}, // Case map - 0x1F50: []rune{0x03C5, 0x0313}, // Case map - 0x1F52: []rune{0x03C5, 0x0313, 0x0300}, // Case map - 0x1F54: []rune{0x03C5, 0x0313, 0x0301}, // Case map - 0x1F56: []rune{0x03C5, 0x0313, 0x0342}, // Case map - 0x1F59: []rune{0x1F51}, // Case map - 0x1F5B: []rune{0x1F53}, // Case map - 0x1F5D: []rune{0x1F55}, // Case map - 0x1F5F: []rune{0x1F57}, // Case map - 0x1F68: []rune{0x1F60}, // Case map - 0x1F69: []rune{0x1F61}, // Case map - 0x1F6A: []rune{0x1F62}, // Case map - 0x1F6B: []rune{0x1F63}, // Case map - 0x1F6C: []rune{0x1F64}, // Case map - 0x1F6D: []rune{0x1F65}, // Case map - 0x1F6E: []rune{0x1F66}, // Case map - 0x1F6F: []rune{0x1F67}, // Case map - 0x1F80: []rune{0x1F00, 0x03B9}, // Case map - 0x1F81: []rune{0x1F01, 0x03B9}, // Case map - 0x1F82: []rune{0x1F02, 0x03B9}, // Case map - 0x1F83: []rune{0x1F03, 0x03B9}, // Case map - 0x1F84: []rune{0x1F04, 0x03B9}, // Case map - 0x1F85: []rune{0x1F05, 0x03B9}, // Case map - 0x1F86: []rune{0x1F06, 0x03B9}, // Case map - 0x1F87: []rune{0x1F07, 0x03B9}, // Case map - 0x1F88: []rune{0x1F00, 0x03B9}, // Case map - 0x1F89: []rune{0x1F01, 0x03B9}, // Case map - 0x1F8A: []rune{0x1F02, 0x03B9}, // Case map - 0x1F8B: []rune{0x1F03, 0x03B9}, // Case map - 0x1F8C: []rune{0x1F04, 0x03B9}, // Case map - 0x1F8D: []rune{0x1F05, 0x03B9}, // Case map - 0x1F8E: []rune{0x1F06, 0x03B9}, // Case map - 0x1F8F: []rune{0x1F07, 0x03B9}, // Case map - 0x1F90: []rune{0x1F20, 0x03B9}, // Case map - 0x1F91: []rune{0x1F21, 0x03B9}, // Case map - 0x1F92: []rune{0x1F22, 0x03B9}, // Case map - 0x1F93: []rune{0x1F23, 0x03B9}, // Case map - 0x1F94: []rune{0x1F24, 0x03B9}, // Case map - 0x1F95: []rune{0x1F25, 0x03B9}, // Case map - 0x1F96: []rune{0x1F26, 0x03B9}, // Case map - 0x1F97: []rune{0x1F27, 0x03B9}, // Case map - 0x1F98: []rune{0x1F20, 0x03B9}, // Case map - 0x1F99: []rune{0x1F21, 0x03B9}, // Case map - 0x1F9A: []rune{0x1F22, 0x03B9}, // Case map - 0x1F9B: []rune{0x1F23, 0x03B9}, // Case map - 0x1F9C: []rune{0x1F24, 0x03B9}, // Case map - 0x1F9D: []rune{0x1F25, 0x03B9}, // Case map - 0x1F9E: []rune{0x1F26, 0x03B9}, // Case map - 0x1F9F: []rune{0x1F27, 0x03B9}, // Case map - 0x1FA0: []rune{0x1F60, 0x03B9}, // Case map - 0x1FA1: []rune{0x1F61, 0x03B9}, // Case map - 0x1FA2: []rune{0x1F62, 0x03B9}, // Case map - 0x1FA3: []rune{0x1F63, 0x03B9}, // Case map - 0x1FA4: []rune{0x1F64, 0x03B9}, // Case map - 0x1FA5: []rune{0x1F65, 0x03B9}, // Case map - 0x1FA6: []rune{0x1F66, 0x03B9}, // Case map - 0x1FA7: []rune{0x1F67, 0x03B9}, // Case map - 0x1FA8: []rune{0x1F60, 0x03B9}, // Case map - 0x1FA9: []rune{0x1F61, 0x03B9}, // Case map - 0x1FAA: []rune{0x1F62, 0x03B9}, // Case map - 0x1FAB: []rune{0x1F63, 0x03B9}, // Case map - 0x1FAC: []rune{0x1F64, 0x03B9}, // Case map - 0x1FAD: []rune{0x1F65, 0x03B9}, // Case map - 0x1FAE: []rune{0x1F66, 0x03B9}, // Case map - 0x1FAF: []rune{0x1F67, 0x03B9}, // Case map - 0x1FB2: []rune{0x1F70, 0x03B9}, // Case map - 0x1FB3: []rune{0x03B1, 0x03B9}, // Case map - 0x1FB4: []rune{0x03AC, 0x03B9}, // Case map - 0x1FB6: []rune{0x03B1, 0x0342}, // Case map - 0x1FB7: []rune{0x03B1, 0x0342, 0x03B9}, // Case map - 0x1FB8: []rune{0x1FB0}, // Case map - 0x1FB9: []rune{0x1FB1}, // Case map - 0x1FBA: []rune{0x1F70}, // Case map - 0x1FBB: []rune{0x1F71}, // Case map - 0x1FBC: []rune{0x03B1, 0x03B9}, // Case map - 0x1FBE: []rune{0x03B9}, // Case map - 0x1FC2: []rune{0x1F74, 0x03B9}, // Case map - 0x1FC3: []rune{0x03B7, 0x03B9}, // Case map - 0x1FC4: []rune{0x03AE, 0x03B9}, // Case map - 0x1FC6: []rune{0x03B7, 0x0342}, // Case map - 0x1FC7: []rune{0x03B7, 0x0342, 0x03B9}, // Case map - 0x1FC8: []rune{0x1F72}, // Case map - 0x1FC9: []rune{0x1F73}, // Case map - 0x1FCA: []rune{0x1F74}, // Case map - 0x1FCB: []rune{0x1F75}, // Case map - 0x1FCC: []rune{0x03B7, 0x03B9}, // Case map - 0x1FD2: []rune{0x03B9, 0x0308, 0x0300}, // Case map - 0x1FD3: []rune{0x03B9, 0x0308, 0x0301}, // Case map - 0x1FD6: []rune{0x03B9, 0x0342}, // Case map - 0x1FD7: []rune{0x03B9, 0x0308, 0x0342}, // Case map - 0x1FD8: []rune{0x1FD0}, // Case map - 0x1FD9: []rune{0x1FD1}, // Case map - 0x1FDA: []rune{0x1F76}, // Case map - 0x1FDB: []rune{0x1F77}, // Case map - 0x1FE2: []rune{0x03C5, 0x0308, 0x0300}, // Case map - 0x1FE3: []rune{0x03C5, 0x0308, 0x0301}, // Case map - 0x1FE4: []rune{0x03C1, 0x0313}, // Case map - 0x1FE6: []rune{0x03C5, 0x0342}, // Case map - 0x1FE7: []rune{0x03C5, 0x0308, 0x0342}, // Case map - 0x1FE8: []rune{0x1FE0}, // Case map - 0x1FE9: []rune{0x1FE1}, // Case map - 0x1FEA: []rune{0x1F7A}, // Case map - 0x1FEB: []rune{0x1F7B}, // Case map - 0x1FEC: []rune{0x1FE5}, // Case map - 0x1FF2: []rune{0x1F7C, 0x03B9}, // Case map - 0x1FF3: []rune{0x03C9, 0x03B9}, // Case map - 0x1FF4: []rune{0x03CE, 0x03B9}, // Case map - 0x1FF6: []rune{0x03C9, 0x0342}, // Case map - 0x1FF7: []rune{0x03C9, 0x0342, 0x03B9}, // Case map - 0x1FF8: []rune{0x1F78}, // Case map - 0x1FF9: []rune{0x1F79}, // Case map - 0x1FFA: []rune{0x1F7C}, // Case map - 0x1FFB: []rune{0x1F7D}, // Case map - 0x1FFC: []rune{0x03C9, 0x03B9}, // Case map - 0x20A8: []rune{0x0072, 0x0073}, // Additional folding - 0x2102: []rune{0x0063}, // Additional folding - 0x2103: []rune{0x00B0, 0x0063}, // Additional folding - 0x2107: []rune{0x025B}, // Additional folding - 0x2109: []rune{0x00B0, 0x0066}, // Additional folding - 0x210B: []rune{0x0068}, // Additional folding - 0x210C: []rune{0x0068}, // Additional folding - 0x210D: []rune{0x0068}, // Additional folding - 0x2110: []rune{0x0069}, // Additional folding - 0x2111: []rune{0x0069}, // Additional folding - 0x2112: []rune{0x006C}, // Additional folding - 0x2115: []rune{0x006E}, // Additional folding - 0x2116: []rune{0x006E, 0x006F}, // Additional folding - 0x2119: []rune{0x0070}, // Additional folding - 0x211A: []rune{0x0071}, // Additional folding - 0x211B: []rune{0x0072}, // Additional folding - 0x211C: []rune{0x0072}, // Additional folding - 0x211D: []rune{0x0072}, // Additional folding - 0x2120: []rune{0x0073, 0x006D}, // Additional folding - 0x2121: []rune{0x0074, 0x0065, 0x006C}, // Additional folding - 0x2122: []rune{0x0074, 0x006D}, // Additional folding - 0x2124: []rune{0x007A}, // Additional folding - 0x2126: []rune{0x03C9}, // Case map - 0x2128: []rune{0x007A}, // Additional folding - 0x212A: []rune{0x006B}, // Case map - 0x212B: []rune{0x00E5}, // Case map - 0x212C: []rune{0x0062}, // Additional folding - 0x212D: []rune{0x0063}, // Additional folding - 0x2130: []rune{0x0065}, // Additional folding - 0x2131: []rune{0x0066}, // Additional folding - 0x2133: []rune{0x006D}, // Additional folding - 0x213E: []rune{0x03B3}, // Additional folding - 0x213F: []rune{0x03C0}, // Additional folding - 0x2145: []rune{0x0064}, // Additional folding - 0x2160: []rune{0x2170}, // Case map - 0x2161: []rune{0x2171}, // Case map - 0x2162: []rune{0x2172}, // Case map - 0x2163: []rune{0x2173}, // Case map - 0x2164: []rune{0x2174}, // Case map - 0x2165: []rune{0x2175}, // Case map - 0x2166: []rune{0x2176}, // Case map - 0x2167: []rune{0x2177}, // Case map - 0x2168: []rune{0x2178}, // Case map - 0x2169: []rune{0x2179}, // Case map - 0x216A: []rune{0x217A}, // Case map - 0x216B: []rune{0x217B}, // Case map - 0x216C: []rune{0x217C}, // Case map - 0x216D: []rune{0x217D}, // Case map - 0x216E: []rune{0x217E}, // Case map - 0x216F: []rune{0x217F}, // Case map - 0x24B6: []rune{0x24D0}, // Case map - 0x24B7: []rune{0x24D1}, // Case map - 0x24B8: []rune{0x24D2}, // Case map - 0x24B9: []rune{0x24D3}, // Case map - 0x24BA: []rune{0x24D4}, // Case map - 0x24BB: []rune{0x24D5}, // Case map - 0x24BC: []rune{0x24D6}, // Case map - 0x24BD: []rune{0x24D7}, // Case map - 0x24BE: []rune{0x24D8}, // Case map - 0x24BF: []rune{0x24D9}, // Case map - 0x24C0: []rune{0x24DA}, // Case map - 0x24C1: []rune{0x24DB}, // Case map - 0x24C2: []rune{0x24DC}, // Case map - 0x24C3: []rune{0x24DD}, // Case map - 0x24C4: []rune{0x24DE}, // Case map - 0x24C5: []rune{0x24DF}, // Case map - 0x24C6: []rune{0x24E0}, // Case map - 0x24C7: []rune{0x24E1}, // Case map - 0x24C8: []rune{0x24E2}, // Case map - 0x24C9: []rune{0x24E3}, // Case map - 0x24CA: []rune{0x24E4}, // Case map - 0x24CB: []rune{0x24E5}, // Case map - 0x24CC: []rune{0x24E6}, // Case map - 0x24CD: []rune{0x24E7}, // Case map - 0x24CE: []rune{0x24E8}, // Case map - 0x24CF: []rune{0x24E9}, // Case map - 0x3371: []rune{0x0068, 0x0070, 0x0061}, // Additional folding - 0x3373: []rune{0x0061, 0x0075}, // Additional folding - 0x3375: []rune{0x006F, 0x0076}, // Additional folding - 0x3380: []rune{0x0070, 0x0061}, // Additional folding - 0x3381: []rune{0x006E, 0x0061}, // Additional folding - 0x3382: []rune{0x03BC, 0x0061}, // Additional folding - 0x3383: []rune{0x006D, 0x0061}, // Additional folding - 0x3384: []rune{0x006B, 0x0061}, // Additional folding - 0x3385: []rune{0x006B, 0x0062}, // Additional folding - 0x3386: []rune{0x006D, 0x0062}, // Additional folding - 0x3387: []rune{0x0067, 0x0062}, // Additional folding - 0x338A: []rune{0x0070, 0x0066}, // Additional folding - 0x338B: []rune{0x006E, 0x0066}, // Additional folding - 0x338C: []rune{0x03BC, 0x0066}, // Additional folding - 0x3390: []rune{0x0068, 0x007A}, // Additional folding - 0x3391: []rune{0x006B, 0x0068, 0x007A}, // Additional folding - 0x3392: []rune{0x006D, 0x0068, 0x007A}, // Additional folding - 0x3393: []rune{0x0067, 0x0068, 0x007A}, // Additional folding - 0x3394: []rune{0x0074, 0x0068, 0x007A}, // Additional folding - 0x33A9: []rune{0x0070, 0x0061}, // Additional folding - 0x33AA: []rune{0x006B, 0x0070, 0x0061}, // Additional folding - 0x33AB: []rune{0x006D, 0x0070, 0x0061}, // Additional folding - 0x33AC: []rune{0x0067, 0x0070, 0x0061}, // Additional folding - 0x33B4: []rune{0x0070, 0x0076}, // Additional folding - 0x33B5: []rune{0x006E, 0x0076}, // Additional folding - 0x33B6: []rune{0x03BC, 0x0076}, // Additional folding - 0x33B7: []rune{0x006D, 0x0076}, // Additional folding - 0x33B8: []rune{0x006B, 0x0076}, // Additional folding - 0x33B9: []rune{0x006D, 0x0076}, // Additional folding - 0x33BA: []rune{0x0070, 0x0077}, // Additional folding - 0x33BB: []rune{0x006E, 0x0077}, // Additional folding - 0x33BC: []rune{0x03BC, 0x0077}, // Additional folding - 0x33BD: []rune{0x006D, 0x0077}, // Additional folding - 0x33BE: []rune{0x006B, 0x0077}, // Additional folding - 0x33BF: []rune{0x006D, 0x0077}, // Additional folding - 0x33C0: []rune{0x006B, 0x03C9}, // Additional folding - 0x33C1: []rune{0x006D, 0x03C9}, // Additional folding - 0x33C3: []rune{0x0062, 0x0071}, // Additional folding - 0x33C6: []rune{0x0063, 0x2215, 0x006B, 0x0067}, // Additional folding - 0x33C7: []rune{0x0063, 0x006F, 0x002E}, // Additional folding - 0x33C8: []rune{0x0064, 0x0062}, // Additional folding - 0x33C9: []rune{0x0067, 0x0079}, // Additional folding - 0x33CB: []rune{0x0068, 0x0070}, // Additional folding - 0x33CD: []rune{0x006B, 0x006B}, // Additional folding - 0x33CE: []rune{0x006B, 0x006D}, // Additional folding - 0x33D7: []rune{0x0070, 0x0068}, // Additional folding - 0x33D9: []rune{0x0070, 0x0070, 0x006D}, // Additional folding - 0x33DA: []rune{0x0070, 0x0072}, // Additional folding - 0x33DC: []rune{0x0073, 0x0076}, // Additional folding - 0x33DD: []rune{0x0077, 0x0062}, // Additional folding - 0xFB00: []rune{0x0066, 0x0066}, // Case map - 0xFB01: []rune{0x0066, 0x0069}, // Case map - 0xFB02: []rune{0x0066, 0x006C}, // Case map - 0xFB03: []rune{0x0066, 0x0066, 0x0069}, // Case map - 0xFB04: []rune{0x0066, 0x0066, 0x006C}, // Case map - 0xFB05: []rune{0x0073, 0x0074}, // Case map - 0xFB06: []rune{0x0073, 0x0074}, // Case map - 0xFB13: []rune{0x0574, 0x0576}, // Case map - 0xFB14: []rune{0x0574, 0x0565}, // Case map - 0xFB15: []rune{0x0574, 0x056B}, // Case map - 0xFB16: []rune{0x057E, 0x0576}, // Case map - 0xFB17: []rune{0x0574, 0x056D}, // Case map - 0xFF21: []rune{0xFF41}, // Case map - 0xFF22: []rune{0xFF42}, // Case map - 0xFF23: []rune{0xFF43}, // Case map - 0xFF24: []rune{0xFF44}, // Case map - 0xFF25: []rune{0xFF45}, // Case map - 0xFF26: []rune{0xFF46}, // Case map - 0xFF27: []rune{0xFF47}, // Case map - 0xFF28: []rune{0xFF48}, // Case map - 0xFF29: []rune{0xFF49}, // Case map - 0xFF2A: []rune{0xFF4A}, // Case map - 0xFF2B: []rune{0xFF4B}, // Case map - 0xFF2C: []rune{0xFF4C}, // Case map - 0xFF2D: []rune{0xFF4D}, // Case map - 0xFF2E: []rune{0xFF4E}, // Case map - 0xFF2F: []rune{0xFF4F}, // Case map - 0xFF30: []rune{0xFF50}, // Case map - 0xFF31: []rune{0xFF51}, // Case map - 0xFF32: []rune{0xFF52}, // Case map - 0xFF33: []rune{0xFF53}, // Case map - 0xFF34: []rune{0xFF54}, // Case map - 0xFF35: []rune{0xFF55}, // Case map - 0xFF36: []rune{0xFF56}, // Case map - 0xFF37: []rune{0xFF57}, // Case map - 0xFF38: []rune{0xFF58}, // Case map - 0xFF39: []rune{0xFF59}, // Case map - 0xFF3A: []rune{0xFF5A}, // Case map - 0x10400: []rune{0x10428}, // Case map - 0x10401: []rune{0x10429}, // Case map - 0x10402: []rune{0x1042A}, // Case map - 0x10403: []rune{0x1042B}, // Case map - 0x10404: []rune{0x1042C}, // Case map - 0x10405: []rune{0x1042D}, // Case map - 0x10406: []rune{0x1042E}, // Case map - 0x10407: []rune{0x1042F}, // Case map - 0x10408: []rune{0x10430}, // Case map - 0x10409: []rune{0x10431}, // Case map - 0x1040A: []rune{0x10432}, // Case map - 0x1040B: []rune{0x10433}, // Case map - 0x1040C: []rune{0x10434}, // Case map - 0x1040D: []rune{0x10435}, // Case map - 0x1040E: []rune{0x10436}, // Case map - 0x1040F: []rune{0x10437}, // Case map - 0x10410: []rune{0x10438}, // Case map - 0x10411: []rune{0x10439}, // Case map - 0x10412: []rune{0x1043A}, // Case map - 0x10413: []rune{0x1043B}, // Case map - 0x10414: []rune{0x1043C}, // Case map - 0x10415: []rune{0x1043D}, // Case map - 0x10416: []rune{0x1043E}, // Case map - 0x10417: []rune{0x1043F}, // Case map - 0x10418: []rune{0x10440}, // Case map - 0x10419: []rune{0x10441}, // Case map - 0x1041A: []rune{0x10442}, // Case map - 0x1041B: []rune{0x10443}, // Case map - 0x1041C: []rune{0x10444}, // Case map - 0x1041D: []rune{0x10445}, // Case map - 0x1041E: []rune{0x10446}, // Case map - 0x1041F: []rune{0x10447}, // Case map - 0x10420: []rune{0x10448}, // Case map - 0x10421: []rune{0x10449}, // Case map - 0x10422: []rune{0x1044A}, // Case map - 0x10423: []rune{0x1044B}, // Case map - 0x10424: []rune{0x1044C}, // Case map - 0x10425: []rune{0x1044D}, // Case map - 0x1D400: []rune{0x0061}, // Additional folding - 0x1D401: []rune{0x0062}, // Additional folding - 0x1D402: []rune{0x0063}, // Additional folding - 0x1D403: []rune{0x0064}, // Additional folding - 0x1D404: []rune{0x0065}, // Additional folding - 0x1D405: []rune{0x0066}, // Additional folding - 0x1D406: []rune{0x0067}, // Additional folding - 0x1D407: []rune{0x0068}, // Additional folding - 0x1D408: []rune{0x0069}, // Additional folding - 0x1D409: []rune{0x006A}, // Additional folding - 0x1D40A: []rune{0x006B}, // Additional folding - 0x1D40B: []rune{0x006C}, // Additional folding - 0x1D40C: []rune{0x006D}, // Additional folding - 0x1D40D: []rune{0x006E}, // Additional folding - 0x1D40E: []rune{0x006F}, // Additional folding - 0x1D40F: []rune{0x0070}, // Additional folding - 0x1D410: []rune{0x0071}, // Additional folding - 0x1D411: []rune{0x0072}, // Additional folding - 0x1D412: []rune{0x0073}, // Additional folding - 0x1D413: []rune{0x0074}, // Additional folding - 0x1D414: []rune{0x0075}, // Additional folding - 0x1D415: []rune{0x0076}, // Additional folding - 0x1D416: []rune{0x0077}, // Additional folding - 0x1D417: []rune{0x0078}, // Additional folding - 0x1D418: []rune{0x0079}, // Additional folding - 0x1D419: []rune{0x007A}, // Additional folding - 0x1D434: []rune{0x0061}, // Additional folding - 0x1D435: []rune{0x0062}, // Additional folding - 0x1D436: []rune{0x0063}, // Additional folding - 0x1D437: []rune{0x0064}, // Additional folding - 0x1D438: []rune{0x0065}, // Additional folding - 0x1D439: []rune{0x0066}, // Additional folding - 0x1D43A: []rune{0x0067}, // Additional folding - 0x1D43B: []rune{0x0068}, // Additional folding - 0x1D43C: []rune{0x0069}, // Additional folding - 0x1D43D: []rune{0x006A}, // Additional folding - 0x1D43E: []rune{0x006B}, // Additional folding - 0x1D43F: []rune{0x006C}, // Additional folding - 0x1D440: []rune{0x006D}, // Additional folding - 0x1D441: []rune{0x006E}, // Additional folding - 0x1D442: []rune{0x006F}, // Additional folding - 0x1D443: []rune{0x0070}, // Additional folding - 0x1D444: []rune{0x0071}, // Additional folding - 0x1D445: []rune{0x0072}, // Additional folding - 0x1D446: []rune{0x0073}, // Additional folding - 0x1D447: []rune{0x0074}, // Additional folding - 0x1D448: []rune{0x0075}, // Additional folding - 0x1D449: []rune{0x0076}, // Additional folding - 0x1D44A: []rune{0x0077}, // Additional folding - 0x1D44B: []rune{0x0078}, // Additional folding - 0x1D44C: []rune{0x0079}, // Additional folding - 0x1D44D: []rune{0x007A}, // Additional folding - 0x1D468: []rune{0x0061}, // Additional folding - 0x1D469: []rune{0x0062}, // Additional folding - 0x1D46A: []rune{0x0063}, // Additional folding - 0x1D46B: []rune{0x0064}, // Additional folding - 0x1D46C: []rune{0x0065}, // Additional folding - 0x1D46D: []rune{0x0066}, // Additional folding - 0x1D46E: []rune{0x0067}, // Additional folding - 0x1D46F: []rune{0x0068}, // Additional folding - 0x1D470: []rune{0x0069}, // Additional folding - 0x1D471: []rune{0x006A}, // Additional folding - 0x1D472: []rune{0x006B}, // Additional folding - 0x1D473: []rune{0x006C}, // Additional folding - 0x1D474: []rune{0x006D}, // Additional folding - 0x1D475: []rune{0x006E}, // Additional folding - 0x1D476: []rune{0x006F}, // Additional folding - 0x1D477: []rune{0x0070}, // Additional folding - 0x1D478: []rune{0x0071}, // Additional folding - 0x1D479: []rune{0x0072}, // Additional folding - 0x1D47A: []rune{0x0073}, // Additional folding - 0x1D47B: []rune{0x0074}, // Additional folding - 0x1D47C: []rune{0x0075}, // Additional folding - 0x1D47D: []rune{0x0076}, // Additional folding - 0x1D47E: []rune{0x0077}, // Additional folding - 0x1D47F: []rune{0x0078}, // Additional folding - 0x1D480: []rune{0x0079}, // Additional folding - 0x1D481: []rune{0x007A}, // Additional folding - 0x1D49C: []rune{0x0061}, // Additional folding - 0x1D49E: []rune{0x0063}, // Additional folding - 0x1D49F: []rune{0x0064}, // Additional folding - 0x1D4A2: []rune{0x0067}, // Additional folding - 0x1D4A5: []rune{0x006A}, // Additional folding - 0x1D4A6: []rune{0x006B}, // Additional folding - 0x1D4A9: []rune{0x006E}, // Additional folding - 0x1D4AA: []rune{0x006F}, // Additional folding - 0x1D4AB: []rune{0x0070}, // Additional folding - 0x1D4AC: []rune{0x0071}, // Additional folding - 0x1D4AE: []rune{0x0073}, // Additional folding - 0x1D4AF: []rune{0x0074}, // Additional folding - 0x1D4B0: []rune{0x0075}, // Additional folding - 0x1D4B1: []rune{0x0076}, // Additional folding - 0x1D4B2: []rune{0x0077}, // Additional folding - 0x1D4B3: []rune{0x0078}, // Additional folding - 0x1D4B4: []rune{0x0079}, // Additional folding - 0x1D4B5: []rune{0x007A}, // Additional folding - 0x1D4D0: []rune{0x0061}, // Additional folding - 0x1D4D1: []rune{0x0062}, // Additional folding - 0x1D4D2: []rune{0x0063}, // Additional folding - 0x1D4D3: []rune{0x0064}, // Additional folding - 0x1D4D4: []rune{0x0065}, // Additional folding - 0x1D4D5: []rune{0x0066}, // Additional folding - 0x1D4D6: []rune{0x0067}, // Additional folding - 0x1D4D7: []rune{0x0068}, // Additional folding - 0x1D4D8: []rune{0x0069}, // Additional folding - 0x1D4D9: []rune{0x006A}, // Additional folding - 0x1D4DA: []rune{0x006B}, // Additional folding - 0x1D4DB: []rune{0x006C}, // Additional folding - 0x1D4DC: []rune{0x006D}, // Additional folding - 0x1D4DD: []rune{0x006E}, // Additional folding - 0x1D4DE: []rune{0x006F}, // Additional folding - 0x1D4DF: []rune{0x0070}, // Additional folding - 0x1D4E0: []rune{0x0071}, // Additional folding - 0x1D4E1: []rune{0x0072}, // Additional folding - 0x1D4E2: []rune{0x0073}, // Additional folding - 0x1D4E3: []rune{0x0074}, // Additional folding - 0x1D4E4: []rune{0x0075}, // Additional folding - 0x1D4E5: []rune{0x0076}, // Additional folding - 0x1D4E6: []rune{0x0077}, // Additional folding - 0x1D4E7: []rune{0x0078}, // Additional folding - 0x1D4E8: []rune{0x0079}, // Additional folding - 0x1D4E9: []rune{0x007A}, // Additional folding - 0x1D504: []rune{0x0061}, // Additional folding - 0x1D505: []rune{0x0062}, // Additional folding - 0x1D507: []rune{0x0064}, // Additional folding - 0x1D508: []rune{0x0065}, // Additional folding - 0x1D509: []rune{0x0066}, // Additional folding - 0x1D50A: []rune{0x0067}, // Additional folding - 0x1D50D: []rune{0x006A}, // Additional folding - 0x1D50E: []rune{0x006B}, // Additional folding - 0x1D50F: []rune{0x006C}, // Additional folding - 0x1D510: []rune{0x006D}, // Additional folding - 0x1D511: []rune{0x006E}, // Additional folding - 0x1D512: []rune{0x006F}, // Additional folding - 0x1D513: []rune{0x0070}, // Additional folding - 0x1D514: []rune{0x0071}, // Additional folding - 0x1D516: []rune{0x0073}, // Additional folding - 0x1D517: []rune{0x0074}, // Additional folding - 0x1D518: []rune{0x0075}, // Additional folding - 0x1D519: []rune{0x0076}, // Additional folding - 0x1D51A: []rune{0x0077}, // Additional folding - 0x1D51B: []rune{0x0078}, // Additional folding - 0x1D51C: []rune{0x0079}, // Additional folding - 0x1D538: []rune{0x0061}, // Additional folding - 0x1D539: []rune{0x0062}, // Additional folding - 0x1D53B: []rune{0x0064}, // Additional folding - 0x1D53C: []rune{0x0065}, // Additional folding - 0x1D53D: []rune{0x0066}, // Additional folding - 0x1D53E: []rune{0x0067}, // Additional folding - 0x1D540: []rune{0x0069}, // Additional folding - 0x1D541: []rune{0x006A}, // Additional folding - 0x1D542: []rune{0x006B}, // Additional folding - 0x1D543: []rune{0x006C}, // Additional folding - 0x1D544: []rune{0x006D}, // Additional folding - 0x1D546: []rune{0x006F}, // Additional folding - 0x1D54A: []rune{0x0073}, // Additional folding - 0x1D54B: []rune{0x0074}, // Additional folding - 0x1D54C: []rune{0x0075}, // Additional folding - 0x1D54D: []rune{0x0076}, // Additional folding - 0x1D54E: []rune{0x0077}, // Additional folding - 0x1D54F: []rune{0x0078}, // Additional folding - 0x1D550: []rune{0x0079}, // Additional folding - 0x1D56C: []rune{0x0061}, // Additional folding - 0x1D56D: []rune{0x0062}, // Additional folding - 0x1D56E: []rune{0x0063}, // Additional folding - 0x1D56F: []rune{0x0064}, // Additional folding - 0x1D570: []rune{0x0065}, // Additional folding - 0x1D571: []rune{0x0066}, // Additional folding - 0x1D572: []rune{0x0067}, // Additional folding - 0x1D573: []rune{0x0068}, // Additional folding - 0x1D574: []rune{0x0069}, // Additional folding - 0x1D575: []rune{0x006A}, // Additional folding - 0x1D576: []rune{0x006B}, // Additional folding - 0x1D577: []rune{0x006C}, // Additional folding - 0x1D578: []rune{0x006D}, // Additional folding - 0x1D579: []rune{0x006E}, // Additional folding - 0x1D57A: []rune{0x006F}, // Additional folding - 0x1D57B: []rune{0x0070}, // Additional folding - 0x1D57C: []rune{0x0071}, // Additional folding - 0x1D57D: []rune{0x0072}, // Additional folding - 0x1D57E: []rune{0x0073}, // Additional folding - 0x1D57F: []rune{0x0074}, // Additional folding - 0x1D580: []rune{0x0075}, // Additional folding - 0x1D581: []rune{0x0076}, // Additional folding - 0x1D582: []rune{0x0077}, // Additional folding - 0x1D583: []rune{0x0078}, // Additional folding - 0x1D584: []rune{0x0079}, // Additional folding - 0x1D585: []rune{0x007A}, // Additional folding - 0x1D5A0: []rune{0x0061}, // Additional folding - 0x1D5A1: []rune{0x0062}, // Additional folding - 0x1D5A2: []rune{0x0063}, // Additional folding - 0x1D5A3: []rune{0x0064}, // Additional folding - 0x1D5A4: []rune{0x0065}, // Additional folding - 0x1D5A5: []rune{0x0066}, // Additional folding - 0x1D5A6: []rune{0x0067}, // Additional folding - 0x1D5A7: []rune{0x0068}, // Additional folding - 0x1D5A8: []rune{0x0069}, // Additional folding - 0x1D5A9: []rune{0x006A}, // Additional folding - 0x1D5AA: []rune{0x006B}, // Additional folding - 0x1D5AB: []rune{0x006C}, // Additional folding - 0x1D5AC: []rune{0x006D}, // Additional folding - 0x1D5AD: []rune{0x006E}, // Additional folding - 0x1D5AE: []rune{0x006F}, // Additional folding - 0x1D5AF: []rune{0x0070}, // Additional folding - 0x1D5B0: []rune{0x0071}, // Additional folding - 0x1D5B1: []rune{0x0072}, // Additional folding - 0x1D5B2: []rune{0x0073}, // Additional folding - 0x1D5B3: []rune{0x0074}, // Additional folding - 0x1D5B4: []rune{0x0075}, // Additional folding - 0x1D5B5: []rune{0x0076}, // Additional folding - 0x1D5B6: []rune{0x0077}, // Additional folding - 0x1D5B7: []rune{0x0078}, // Additional folding - 0x1D5B8: []rune{0x0079}, // Additional folding - 0x1D5B9: []rune{0x007A}, // Additional folding - 0x1D5D4: []rune{0x0061}, // Additional folding - 0x1D5D5: []rune{0x0062}, // Additional folding - 0x1D5D6: []rune{0x0063}, // Additional folding - 0x1D5D7: []rune{0x0064}, // Additional folding - 0x1D5D8: []rune{0x0065}, // Additional folding - 0x1D5D9: []rune{0x0066}, // Additional folding - 0x1D5DA: []rune{0x0067}, // Additional folding - 0x1D5DB: []rune{0x0068}, // Additional folding - 0x1D5DC: []rune{0x0069}, // Additional folding - 0x1D5DD: []rune{0x006A}, // Additional folding - 0x1D5DE: []rune{0x006B}, // Additional folding - 0x1D5DF: []rune{0x006C}, // Additional folding - 0x1D5E0: []rune{0x006D}, // Additional folding - 0x1D5E1: []rune{0x006E}, // Additional folding - 0x1D5E2: []rune{0x006F}, // Additional folding - 0x1D5E3: []rune{0x0070}, // Additional folding - 0x1D5E4: []rune{0x0071}, // Additional folding - 0x1D5E5: []rune{0x0072}, // Additional folding - 0x1D5E6: []rune{0x0073}, // Additional folding - 0x1D5E7: []rune{0x0074}, // Additional folding - 0x1D5E8: []rune{0x0075}, // Additional folding - 0x1D5E9: []rune{0x0076}, // Additional folding - 0x1D5EA: []rune{0x0077}, // Additional folding - 0x1D5EB: []rune{0x0078}, // Additional folding - 0x1D5EC: []rune{0x0079}, // Additional folding - 0x1D5ED: []rune{0x007A}, // Additional folding - 0x1D608: []rune{0x0061}, // Additional folding - 0x1D609: []rune{0x0062}, // Additional folding - 0x1D60A: []rune{0x0063}, // Additional folding - 0x1D60B: []rune{0x0064}, // Additional folding - 0x1D60C: []rune{0x0065}, // Additional folding - 0x1D60D: []rune{0x0066}, // Additional folding - 0x1D60E: []rune{0x0067}, // Additional folding - 0x1D60F: []rune{0x0068}, // Additional folding - 0x1D610: []rune{0x0069}, // Additional folding - 0x1D611: []rune{0x006A}, // Additional folding - 0x1D612: []rune{0x006B}, // Additional folding - 0x1D613: []rune{0x006C}, // Additional folding - 0x1D614: []rune{0x006D}, // Additional folding - 0x1D615: []rune{0x006E}, // Additional folding - 0x1D616: []rune{0x006F}, // Additional folding - 0x1D617: []rune{0x0070}, // Additional folding - 0x1D618: []rune{0x0071}, // Additional folding - 0x1D619: []rune{0x0072}, // Additional folding - 0x1D61A: []rune{0x0073}, // Additional folding - 0x1D61B: []rune{0x0074}, // Additional folding - 0x1D61C: []rune{0x0075}, // Additional folding - 0x1D61D: []rune{0x0076}, // Additional folding - 0x1D61E: []rune{0x0077}, // Additional folding - 0x1D61F: []rune{0x0078}, // Additional folding - 0x1D620: []rune{0x0079}, // Additional folding - 0x1D621: []rune{0x007A}, // Additional folding - 0x1D63C: []rune{0x0061}, // Additional folding - 0x1D63D: []rune{0x0062}, // Additional folding - 0x1D63E: []rune{0x0063}, // Additional folding - 0x1D63F: []rune{0x0064}, // Additional folding - 0x1D640: []rune{0x0065}, // Additional folding - 0x1D641: []rune{0x0066}, // Additional folding - 0x1D642: []rune{0x0067}, // Additional folding - 0x1D643: []rune{0x0068}, // Additional folding - 0x1D644: []rune{0x0069}, // Additional folding - 0x1D645: []rune{0x006A}, // Additional folding - 0x1D646: []rune{0x006B}, // Additional folding - 0x1D647: []rune{0x006C}, // Additional folding - 0x1D648: []rune{0x006D}, // Additional folding - 0x1D649: []rune{0x006E}, // Additional folding - 0x1D64A: []rune{0x006F}, // Additional folding - 0x1D64B: []rune{0x0070}, // Additional folding - 0x1D64C: []rune{0x0071}, // Additional folding - 0x1D64D: []rune{0x0072}, // Additional folding - 0x1D64E: []rune{0x0073}, // Additional folding - 0x1D64F: []rune{0x0074}, // Additional folding - 0x1D650: []rune{0x0075}, // Additional folding - 0x1D651: []rune{0x0076}, // Additional folding - 0x1D652: []rune{0x0077}, // Additional folding - 0x1D653: []rune{0x0078}, // Additional folding - 0x1D654: []rune{0x0079}, // Additional folding - 0x1D655: []rune{0x007A}, // Additional folding - 0x1D670: []rune{0x0061}, // Additional folding - 0x1D671: []rune{0x0062}, // Additional folding - 0x1D672: []rune{0x0063}, // Additional folding - 0x1D673: []rune{0x0064}, // Additional folding - 0x1D674: []rune{0x0065}, // Additional folding - 0x1D675: []rune{0x0066}, // Additional folding - 0x1D676: []rune{0x0067}, // Additional folding - 0x1D677: []rune{0x0068}, // Additional folding - 0x1D678: []rune{0x0069}, // Additional folding - 0x1D679: []rune{0x006A}, // Additional folding - 0x1D67A: []rune{0x006B}, // Additional folding - 0x1D67B: []rune{0x006C}, // Additional folding - 0x1D67C: []rune{0x006D}, // Additional folding - 0x1D67D: []rune{0x006E}, // Additional folding - 0x1D67E: []rune{0x006F}, // Additional folding - 0x1D67F: []rune{0x0070}, // Additional folding - 0x1D680: []rune{0x0071}, // Additional folding - 0x1D681: []rune{0x0072}, // Additional folding - 0x1D682: []rune{0x0073}, // Additional folding - 0x1D683: []rune{0x0074}, // Additional folding - 0x1D684: []rune{0x0075}, // Additional folding - 0x1D685: []rune{0x0076}, // Additional folding - 0x1D686: []rune{0x0077}, // Additional folding - 0x1D687: []rune{0x0078}, // Additional folding - 0x1D688: []rune{0x0079}, // Additional folding - 0x1D689: []rune{0x007A}, // Additional folding - 0x1D6A8: []rune{0x03B1}, // Additional folding - 0x1D6A9: []rune{0x03B2}, // Additional folding - 0x1D6AA: []rune{0x03B3}, // Additional folding - 0x1D6AB: []rune{0x03B4}, // Additional folding - 0x1D6AC: []rune{0x03B5}, // Additional folding - 0x1D6AD: []rune{0x03B6}, // Additional folding - 0x1D6AE: []rune{0x03B7}, // Additional folding - 0x1D6AF: []rune{0x03B8}, // Additional folding - 0x1D6B0: []rune{0x03B9}, // Additional folding - 0x1D6B1: []rune{0x03BA}, // Additional folding - 0x1D6B2: []rune{0x03BB}, // Additional folding - 0x1D6B3: []rune{0x03BC}, // Additional folding - 0x1D6B4: []rune{0x03BD}, // Additional folding - 0x1D6B5: []rune{0x03BE}, // Additional folding - 0x1D6B6: []rune{0x03BF}, // Additional folding - 0x1D6B7: []rune{0x03C0}, // Additional folding - 0x1D6B8: []rune{0x03C1}, // Additional folding - 0x1D6B9: []rune{0x03B8}, // Additional folding - 0x1D6BA: []rune{0x03C3}, // Additional folding - 0x1D6BB: []rune{0x03C4}, // Additional folding - 0x1D6BC: []rune{0x03C5}, // Additional folding - 0x1D6BD: []rune{0x03C6}, // Additional folding - 0x1D6BE: []rune{0x03C7}, // Additional folding - 0x1D6BF: []rune{0x03C8}, // Additional folding - 0x1D6C0: []rune{0x03C9}, // Additional folding - 0x1D6D3: []rune{0x03C3}, // Additional folding - 0x1D6E2: []rune{0x03B1}, // Additional folding - 0x1D6E3: []rune{0x03B2}, // Additional folding - 0x1D6E4: []rune{0x03B3}, // Additional folding - 0x1D6E5: []rune{0x03B4}, // Additional folding - 0x1D6E6: []rune{0x03B5}, // Additional folding - 0x1D6E7: []rune{0x03B6}, // Additional folding - 0x1D6E8: []rune{0x03B7}, // Additional folding - 0x1D6E9: []rune{0x03B8}, // Additional folding - 0x1D6EA: []rune{0x03B9}, // Additional folding - 0x1D6EB: []rune{0x03BA}, // Additional folding - 0x1D6EC: []rune{0x03BB}, // Additional folding - 0x1D6ED: []rune{0x03BC}, // Additional folding - 0x1D6EE: []rune{0x03BD}, // Additional folding - 0x1D6EF: []rune{0x03BE}, // Additional folding - 0x1D6F0: []rune{0x03BF}, // Additional folding - 0x1D6F1: []rune{0x03C0}, // Additional folding - 0x1D6F2: []rune{0x03C1}, // Additional folding - 0x1D6F3: []rune{0x03B8}, // Additional folding - 0x1D6F4: []rune{0x03C3}, // Additional folding - 0x1D6F5: []rune{0x03C4}, // Additional folding - 0x1D6F6: []rune{0x03C5}, // Additional folding - 0x1D6F7: []rune{0x03C6}, // Additional folding - 0x1D6F8: []rune{0x03C7}, // Additional folding - 0x1D6F9: []rune{0x03C8}, // Additional folding - 0x1D6FA: []rune{0x03C9}, // Additional folding - 0x1D70D: []rune{0x03C3}, // Additional folding - 0x1D71C: []rune{0x03B1}, // Additional folding - 0x1D71D: []rune{0x03B2}, // Additional folding - 0x1D71E: []rune{0x03B3}, // Additional folding - 0x1D71F: []rune{0x03B4}, // Additional folding - 0x1D720: []rune{0x03B5}, // Additional folding - 0x1D721: []rune{0x03B6}, // Additional folding - 0x1D722: []rune{0x03B7}, // Additional folding - 0x1D723: []rune{0x03B8}, // Additional folding - 0x1D724: []rune{0x03B9}, // Additional folding - 0x1D725: []rune{0x03BA}, // Additional folding - 0x1D726: []rune{0x03BB}, // Additional folding - 0x1D727: []rune{0x03BC}, // Additional folding - 0x1D728: []rune{0x03BD}, // Additional folding - 0x1D729: []rune{0x03BE}, // Additional folding - 0x1D72A: []rune{0x03BF}, // Additional folding - 0x1D72B: []rune{0x03C0}, // Additional folding - 0x1D72C: []rune{0x03C1}, // Additional folding - 0x1D72D: []rune{0x03B8}, // Additional folding - 0x1D72E: []rune{0x03C3}, // Additional folding - 0x1D72F: []rune{0x03C4}, // Additional folding - 0x1D730: []rune{0x03C5}, // Additional folding - 0x1D731: []rune{0x03C6}, // Additional folding - 0x1D732: []rune{0x03C7}, // Additional folding - 0x1D733: []rune{0x03C8}, // Additional folding - 0x1D734: []rune{0x03C9}, // Additional folding - 0x1D747: []rune{0x03C3}, // Additional folding - 0x1D756: []rune{0x03B1}, // Additional folding - 0x1D757: []rune{0x03B2}, // Additional folding - 0x1D758: []rune{0x03B3}, // Additional folding - 0x1D759: []rune{0x03B4}, // Additional folding - 0x1D75A: []rune{0x03B5}, // Additional folding - 0x1D75B: []rune{0x03B6}, // Additional folding - 0x1D75C: []rune{0x03B7}, // Additional folding - 0x1D75D: []rune{0x03B8}, // Additional folding - 0x1D75E: []rune{0x03B9}, // Additional folding - 0x1D75F: []rune{0x03BA}, // Additional folding - 0x1D760: []rune{0x03BB}, // Additional folding - 0x1D761: []rune{0x03BC}, // Additional folding - 0x1D762: []rune{0x03BD}, // Additional folding - 0x1D763: []rune{0x03BE}, // Additional folding - 0x1D764: []rune{0x03BF}, // Additional folding - 0x1D765: []rune{0x03C0}, // Additional folding - 0x1D766: []rune{0x03C1}, // Additional folding - 0x1D767: []rune{0x03B8}, // Additional folding - 0x1D768: []rune{0x03C3}, // Additional folding - 0x1D769: []rune{0x03C4}, // Additional folding - 0x1D76A: []rune{0x03C5}, // Additional folding - 0x1D76B: []rune{0x03C6}, // Additional folding - 0x1D76C: []rune{0x03C7}, // Additional folding - 0x1D76D: []rune{0x03C8}, // Additional folding - 0x1D76E: []rune{0x03C9}, // Additional folding - 0x1D781: []rune{0x03C3}, // Additional folding - 0x1D790: []rune{0x03B1}, // Additional folding - 0x1D791: []rune{0x03B2}, // Additional folding - 0x1D792: []rune{0x03B3}, // Additional folding - 0x1D793: []rune{0x03B4}, // Additional folding - 0x1D794: []rune{0x03B5}, // Additional folding - 0x1D795: []rune{0x03B6}, // Additional folding - 0x1D796: []rune{0x03B7}, // Additional folding - 0x1D797: []rune{0x03B8}, // Additional folding - 0x1D798: []rune{0x03B9}, // Additional folding - 0x1D799: []rune{0x03BA}, // Additional folding - 0x1D79A: []rune{0x03BB}, // Additional folding - 0x1D79B: []rune{0x03BC}, // Additional folding - 0x1D79C: []rune{0x03BD}, // Additional folding - 0x1D79D: []rune{0x03BE}, // Additional folding - 0x1D79E: []rune{0x03BF}, // Additional folding - 0x1D79F: []rune{0x03C0}, // Additional folding - 0x1D7A0: []rune{0x03C1}, // Additional folding - 0x1D7A1: []rune{0x03B8}, // Additional folding - 0x1D7A2: []rune{0x03C3}, // Additional folding - 0x1D7A3: []rune{0x03C4}, // Additional folding - 0x1D7A4: []rune{0x03C5}, // Additional folding - 0x1D7A5: []rune{0x03C6}, // Additional folding - 0x1D7A6: []rune{0x03C7}, // Additional folding - 0x1D7A7: []rune{0x03C8}, // Additional folding - 0x1D7A8: []rune{0x03C9}, // Additional folding - 0x1D7BB: []rune{0x03C3}, // Additional folding -} - -// TableB2 represents RFC-3454 Table B.2. -var TableB2 Mapping = tableB2 - -var tableB3 = Mapping{ - 0x0041: []rune{0x0061}, // Case map - 0x0042: []rune{0x0062}, // Case map - 0x0043: []rune{0x0063}, // Case map - 0x0044: []rune{0x0064}, // Case map - 0x0045: []rune{0x0065}, // Case map - 0x0046: []rune{0x0066}, // Case map - 0x0047: []rune{0x0067}, // Case map - 0x0048: []rune{0x0068}, // Case map - 0x0049: []rune{0x0069}, // Case map - 0x004A: []rune{0x006A}, // Case map - 0x004B: []rune{0x006B}, // Case map - 0x004C: []rune{0x006C}, // Case map - 0x004D: []rune{0x006D}, // Case map - 0x004E: []rune{0x006E}, // Case map - 0x004F: []rune{0x006F}, // Case map - 0x0050: []rune{0x0070}, // Case map - 0x0051: []rune{0x0071}, // Case map - 0x0052: []rune{0x0072}, // Case map - 0x0053: []rune{0x0073}, // Case map - 0x0054: []rune{0x0074}, // Case map - 0x0055: []rune{0x0075}, // Case map - 0x0056: []rune{0x0076}, // Case map - 0x0057: []rune{0x0077}, // Case map - 0x0058: []rune{0x0078}, // Case map - 0x0059: []rune{0x0079}, // Case map - 0x005A: []rune{0x007A}, // Case map - 0x00B5: []rune{0x03BC}, // Case map - 0x00C0: []rune{0x00E0}, // Case map - 0x00C1: []rune{0x00E1}, // Case map - 0x00C2: []rune{0x00E2}, // Case map - 0x00C3: []rune{0x00E3}, // Case map - 0x00C4: []rune{0x00E4}, // Case map - 0x00C5: []rune{0x00E5}, // Case map - 0x00C6: []rune{0x00E6}, // Case map - 0x00C7: []rune{0x00E7}, // Case map - 0x00C8: []rune{0x00E8}, // Case map - 0x00C9: []rune{0x00E9}, // Case map - 0x00CA: []rune{0x00EA}, // Case map - 0x00CB: []rune{0x00EB}, // Case map - 0x00CC: []rune{0x00EC}, // Case map - 0x00CD: []rune{0x00ED}, // Case map - 0x00CE: []rune{0x00EE}, // Case map - 0x00CF: []rune{0x00EF}, // Case map - 0x00D0: []rune{0x00F0}, // Case map - 0x00D1: []rune{0x00F1}, // Case map - 0x00D2: []rune{0x00F2}, // Case map - 0x00D3: []rune{0x00F3}, // Case map - 0x00D4: []rune{0x00F4}, // Case map - 0x00D5: []rune{0x00F5}, // Case map - 0x00D6: []rune{0x00F6}, // Case map - 0x00D8: []rune{0x00F8}, // Case map - 0x00D9: []rune{0x00F9}, // Case map - 0x00DA: []rune{0x00FA}, // Case map - 0x00DB: []rune{0x00FB}, // Case map - 0x00DC: []rune{0x00FC}, // Case map - 0x00DD: []rune{0x00FD}, // Case map - 0x00DE: []rune{0x00FE}, // Case map - 0x00DF: []rune{0x0073, 0x0073}, // Case map - 0x0100: []rune{0x0101}, // Case map - 0x0102: []rune{0x0103}, // Case map - 0x0104: []rune{0x0105}, // Case map - 0x0106: []rune{0x0107}, // Case map - 0x0108: []rune{0x0109}, // Case map - 0x010A: []rune{0x010B}, // Case map - 0x010C: []rune{0x010D}, // Case map - 0x010E: []rune{0x010F}, // Case map - 0x0110: []rune{0x0111}, // Case map - 0x0112: []rune{0x0113}, // Case map - 0x0114: []rune{0x0115}, // Case map - 0x0116: []rune{0x0117}, // Case map - 0x0118: []rune{0x0119}, // Case map - 0x011A: []rune{0x011B}, // Case map - 0x011C: []rune{0x011D}, // Case map - 0x011E: []rune{0x011F}, // Case map - 0x0120: []rune{0x0121}, // Case map - 0x0122: []rune{0x0123}, // Case map - 0x0124: []rune{0x0125}, // Case map - 0x0126: []rune{0x0127}, // Case map - 0x0128: []rune{0x0129}, // Case map - 0x012A: []rune{0x012B}, // Case map - 0x012C: []rune{0x012D}, // Case map - 0x012E: []rune{0x012F}, // Case map - 0x0130: []rune{0x0069, 0x0307}, // Case map - 0x0132: []rune{0x0133}, // Case map - 0x0134: []rune{0x0135}, // Case map - 0x0136: []rune{0x0137}, // Case map - 0x0139: []rune{0x013A}, // Case map - 0x013B: []rune{0x013C}, // Case map - 0x013D: []rune{0x013E}, // Case map - 0x013F: []rune{0x0140}, // Case map - 0x0141: []rune{0x0142}, // Case map - 0x0143: []rune{0x0144}, // Case map - 0x0145: []rune{0x0146}, // Case map - 0x0147: []rune{0x0148}, // Case map - 0x0149: []rune{0x02BC, 0x006E}, // Case map - 0x014A: []rune{0x014B}, // Case map - 0x014C: []rune{0x014D}, // Case map - 0x014E: []rune{0x014F}, // Case map - 0x0150: []rune{0x0151}, // Case map - 0x0152: []rune{0x0153}, // Case map - 0x0154: []rune{0x0155}, // Case map - 0x0156: []rune{0x0157}, // Case map - 0x0158: []rune{0x0159}, // Case map - 0x015A: []rune{0x015B}, // Case map - 0x015C: []rune{0x015D}, // Case map - 0x015E: []rune{0x015F}, // Case map - 0x0160: []rune{0x0161}, // Case map - 0x0162: []rune{0x0163}, // Case map - 0x0164: []rune{0x0165}, // Case map - 0x0166: []rune{0x0167}, // Case map - 0x0168: []rune{0x0169}, // Case map - 0x016A: []rune{0x016B}, // Case map - 0x016C: []rune{0x016D}, // Case map - 0x016E: []rune{0x016F}, // Case map - 0x0170: []rune{0x0171}, // Case map - 0x0172: []rune{0x0173}, // Case map - 0x0174: []rune{0x0175}, // Case map - 0x0176: []rune{0x0177}, // Case map - 0x0178: []rune{0x00FF}, // Case map - 0x0179: []rune{0x017A}, // Case map - 0x017B: []rune{0x017C}, // Case map - 0x017D: []rune{0x017E}, // Case map - 0x017F: []rune{0x0073}, // Case map - 0x0181: []rune{0x0253}, // Case map - 0x0182: []rune{0x0183}, // Case map - 0x0184: []rune{0x0185}, // Case map - 0x0186: []rune{0x0254}, // Case map - 0x0187: []rune{0x0188}, // Case map - 0x0189: []rune{0x0256}, // Case map - 0x018A: []rune{0x0257}, // Case map - 0x018B: []rune{0x018C}, // Case map - 0x018E: []rune{0x01DD}, // Case map - 0x018F: []rune{0x0259}, // Case map - 0x0190: []rune{0x025B}, // Case map - 0x0191: []rune{0x0192}, // Case map - 0x0193: []rune{0x0260}, // Case map - 0x0194: []rune{0x0263}, // Case map - 0x0196: []rune{0x0269}, // Case map - 0x0197: []rune{0x0268}, // Case map - 0x0198: []rune{0x0199}, // Case map - 0x019C: []rune{0x026F}, // Case map - 0x019D: []rune{0x0272}, // Case map - 0x019F: []rune{0x0275}, // Case map - 0x01A0: []rune{0x01A1}, // Case map - 0x01A2: []rune{0x01A3}, // Case map - 0x01A4: []rune{0x01A5}, // Case map - 0x01A6: []rune{0x0280}, // Case map - 0x01A7: []rune{0x01A8}, // Case map - 0x01A9: []rune{0x0283}, // Case map - 0x01AC: []rune{0x01AD}, // Case map - 0x01AE: []rune{0x0288}, // Case map - 0x01AF: []rune{0x01B0}, // Case map - 0x01B1: []rune{0x028A}, // Case map - 0x01B2: []rune{0x028B}, // Case map - 0x01B3: []rune{0x01B4}, // Case map - 0x01B5: []rune{0x01B6}, // Case map - 0x01B7: []rune{0x0292}, // Case map - 0x01B8: []rune{0x01B9}, // Case map - 0x01BC: []rune{0x01BD}, // Case map - 0x01C4: []rune{0x01C6}, // Case map - 0x01C5: []rune{0x01C6}, // Case map - 0x01C7: []rune{0x01C9}, // Case map - 0x01C8: []rune{0x01C9}, // Case map - 0x01CA: []rune{0x01CC}, // Case map - 0x01CB: []rune{0x01CC}, // Case map - 0x01CD: []rune{0x01CE}, // Case map - 0x01CF: []rune{0x01D0}, // Case map - 0x01D1: []rune{0x01D2}, // Case map - 0x01D3: []rune{0x01D4}, // Case map - 0x01D5: []rune{0x01D6}, // Case map - 0x01D7: []rune{0x01D8}, // Case map - 0x01D9: []rune{0x01DA}, // Case map - 0x01DB: []rune{0x01DC}, // Case map - 0x01DE: []rune{0x01DF}, // Case map - 0x01E0: []rune{0x01E1}, // Case map - 0x01E2: []rune{0x01E3}, // Case map - 0x01E4: []rune{0x01E5}, // Case map - 0x01E6: []rune{0x01E7}, // Case map - 0x01E8: []rune{0x01E9}, // Case map - 0x01EA: []rune{0x01EB}, // Case map - 0x01EC: []rune{0x01ED}, // Case map - 0x01EE: []rune{0x01EF}, // Case map - 0x01F0: []rune{0x006A, 0x030C}, // Case map - 0x01F1: []rune{0x01F3}, // Case map - 0x01F2: []rune{0x01F3}, // Case map - 0x01F4: []rune{0x01F5}, // Case map - 0x01F6: []rune{0x0195}, // Case map - 0x01F7: []rune{0x01BF}, // Case map - 0x01F8: []rune{0x01F9}, // Case map - 0x01FA: []rune{0x01FB}, // Case map - 0x01FC: []rune{0x01FD}, // Case map - 0x01FE: []rune{0x01FF}, // Case map - 0x0200: []rune{0x0201}, // Case map - 0x0202: []rune{0x0203}, // Case map - 0x0204: []rune{0x0205}, // Case map - 0x0206: []rune{0x0207}, // Case map - 0x0208: []rune{0x0209}, // Case map - 0x020A: []rune{0x020B}, // Case map - 0x020C: []rune{0x020D}, // Case map - 0x020E: []rune{0x020F}, // Case map - 0x0210: []rune{0x0211}, // Case map - 0x0212: []rune{0x0213}, // Case map - 0x0214: []rune{0x0215}, // Case map - 0x0216: []rune{0x0217}, // Case map - 0x0218: []rune{0x0219}, // Case map - 0x021A: []rune{0x021B}, // Case map - 0x021C: []rune{0x021D}, // Case map - 0x021E: []rune{0x021F}, // Case map - 0x0220: []rune{0x019E}, // Case map - 0x0222: []rune{0x0223}, // Case map - 0x0224: []rune{0x0225}, // Case map - 0x0226: []rune{0x0227}, // Case map - 0x0228: []rune{0x0229}, // Case map - 0x022A: []rune{0x022B}, // Case map - 0x022C: []rune{0x022D}, // Case map - 0x022E: []rune{0x022F}, // Case map - 0x0230: []rune{0x0231}, // Case map - 0x0232: []rune{0x0233}, // Case map - 0x0345: []rune{0x03B9}, // Case map - 0x0386: []rune{0x03AC}, // Case map - 0x0388: []rune{0x03AD}, // Case map - 0x0389: []rune{0x03AE}, // Case map - 0x038A: []rune{0x03AF}, // Case map - 0x038C: []rune{0x03CC}, // Case map - 0x038E: []rune{0x03CD}, // Case map - 0x038F: []rune{0x03CE}, // Case map - 0x0390: []rune{0x03B9, 0x0308, 0x0301}, // Case map - 0x0391: []rune{0x03B1}, // Case map - 0x0392: []rune{0x03B2}, // Case map - 0x0393: []rune{0x03B3}, // Case map - 0x0394: []rune{0x03B4}, // Case map - 0x0395: []rune{0x03B5}, // Case map - 0x0396: []rune{0x03B6}, // Case map - 0x0397: []rune{0x03B7}, // Case map - 0x0398: []rune{0x03B8}, // Case map - 0x0399: []rune{0x03B9}, // Case map - 0x039A: []rune{0x03BA}, // Case map - 0x039B: []rune{0x03BB}, // Case map - 0x039C: []rune{0x03BC}, // Case map - 0x039D: []rune{0x03BD}, // Case map - 0x039E: []rune{0x03BE}, // Case map - 0x039F: []rune{0x03BF}, // Case map - 0x03A0: []rune{0x03C0}, // Case map - 0x03A1: []rune{0x03C1}, // Case map - 0x03A3: []rune{0x03C3}, // Case map - 0x03A4: []rune{0x03C4}, // Case map - 0x03A5: []rune{0x03C5}, // Case map - 0x03A6: []rune{0x03C6}, // Case map - 0x03A7: []rune{0x03C7}, // Case map - 0x03A8: []rune{0x03C8}, // Case map - 0x03A9: []rune{0x03C9}, // Case map - 0x03AA: []rune{0x03CA}, // Case map - 0x03AB: []rune{0x03CB}, // Case map - 0x03B0: []rune{0x03C5, 0x0308, 0x0301}, // Case map - 0x03C2: []rune{0x03C3}, // Case map - 0x03D0: []rune{0x03B2}, // Case map - 0x03D1: []rune{0x03B8}, // Case map - 0x03D5: []rune{0x03C6}, // Case map - 0x03D6: []rune{0x03C0}, // Case map - 0x03D8: []rune{0x03D9}, // Case map - 0x03DA: []rune{0x03DB}, // Case map - 0x03DC: []rune{0x03DD}, // Case map - 0x03DE: []rune{0x03DF}, // Case map - 0x03E0: []rune{0x03E1}, // Case map - 0x03E2: []rune{0x03E3}, // Case map - 0x03E4: []rune{0x03E5}, // Case map - 0x03E6: []rune{0x03E7}, // Case map - 0x03E8: []rune{0x03E9}, // Case map - 0x03EA: []rune{0x03EB}, // Case map - 0x03EC: []rune{0x03ED}, // Case map - 0x03EE: []rune{0x03EF}, // Case map - 0x03F0: []rune{0x03BA}, // Case map - 0x03F1: []rune{0x03C1}, // Case map - 0x03F2: []rune{0x03C3}, // Case map - 0x03F4: []rune{0x03B8}, // Case map - 0x03F5: []rune{0x03B5}, // Case map - 0x0400: []rune{0x0450}, // Case map - 0x0401: []rune{0x0451}, // Case map - 0x0402: []rune{0x0452}, // Case map - 0x0403: []rune{0x0453}, // Case map - 0x0404: []rune{0x0454}, // Case map - 0x0405: []rune{0x0455}, // Case map - 0x0406: []rune{0x0456}, // Case map - 0x0407: []rune{0x0457}, // Case map - 0x0408: []rune{0x0458}, // Case map - 0x0409: []rune{0x0459}, // Case map - 0x040A: []rune{0x045A}, // Case map - 0x040B: []rune{0x045B}, // Case map - 0x040C: []rune{0x045C}, // Case map - 0x040D: []rune{0x045D}, // Case map - 0x040E: []rune{0x045E}, // Case map - 0x040F: []rune{0x045F}, // Case map - 0x0410: []rune{0x0430}, // Case map - 0x0411: []rune{0x0431}, // Case map - 0x0412: []rune{0x0432}, // Case map - 0x0413: []rune{0x0433}, // Case map - 0x0414: []rune{0x0434}, // Case map - 0x0415: []rune{0x0435}, // Case map - 0x0416: []rune{0x0436}, // Case map - 0x0417: []rune{0x0437}, // Case map - 0x0418: []rune{0x0438}, // Case map - 0x0419: []rune{0x0439}, // Case map - 0x041A: []rune{0x043A}, // Case map - 0x041B: []rune{0x043B}, // Case map - 0x041C: []rune{0x043C}, // Case map - 0x041D: []rune{0x043D}, // Case map - 0x041E: []rune{0x043E}, // Case map - 0x041F: []rune{0x043F}, // Case map - 0x0420: []rune{0x0440}, // Case map - 0x0421: []rune{0x0441}, // Case map - 0x0422: []rune{0x0442}, // Case map - 0x0423: []rune{0x0443}, // Case map - 0x0424: []rune{0x0444}, // Case map - 0x0425: []rune{0x0445}, // Case map - 0x0426: []rune{0x0446}, // Case map - 0x0427: []rune{0x0447}, // Case map - 0x0428: []rune{0x0448}, // Case map - 0x0429: []rune{0x0449}, // Case map - 0x042A: []rune{0x044A}, // Case map - 0x042B: []rune{0x044B}, // Case map - 0x042C: []rune{0x044C}, // Case map - 0x042D: []rune{0x044D}, // Case map - 0x042E: []rune{0x044E}, // Case map - 0x042F: []rune{0x044F}, // Case map - 0x0460: []rune{0x0461}, // Case map - 0x0462: []rune{0x0463}, // Case map - 0x0464: []rune{0x0465}, // Case map - 0x0466: []rune{0x0467}, // Case map - 0x0468: []rune{0x0469}, // Case map - 0x046A: []rune{0x046B}, // Case map - 0x046C: []rune{0x046D}, // Case map - 0x046E: []rune{0x046F}, // Case map - 0x0470: []rune{0x0471}, // Case map - 0x0472: []rune{0x0473}, // Case map - 0x0474: []rune{0x0475}, // Case map - 0x0476: []rune{0x0477}, // Case map - 0x0478: []rune{0x0479}, // Case map - 0x047A: []rune{0x047B}, // Case map - 0x047C: []rune{0x047D}, // Case map - 0x047E: []rune{0x047F}, // Case map - 0x0480: []rune{0x0481}, // Case map - 0x048A: []rune{0x048B}, // Case map - 0x048C: []rune{0x048D}, // Case map - 0x048E: []rune{0x048F}, // Case map - 0x0490: []rune{0x0491}, // Case map - 0x0492: []rune{0x0493}, // Case map - 0x0494: []rune{0x0495}, // Case map - 0x0496: []rune{0x0497}, // Case map - 0x0498: []rune{0x0499}, // Case map - 0x049A: []rune{0x049B}, // Case map - 0x049C: []rune{0x049D}, // Case map - 0x049E: []rune{0x049F}, // Case map - 0x04A0: []rune{0x04A1}, // Case map - 0x04A2: []rune{0x04A3}, // Case map - 0x04A4: []rune{0x04A5}, // Case map - 0x04A6: []rune{0x04A7}, // Case map - 0x04A8: []rune{0x04A9}, // Case map - 0x04AA: []rune{0x04AB}, // Case map - 0x04AC: []rune{0x04AD}, // Case map - 0x04AE: []rune{0x04AF}, // Case map - 0x04B0: []rune{0x04B1}, // Case map - 0x04B2: []rune{0x04B3}, // Case map - 0x04B4: []rune{0x04B5}, // Case map - 0x04B6: []rune{0x04B7}, // Case map - 0x04B8: []rune{0x04B9}, // Case map - 0x04BA: []rune{0x04BB}, // Case map - 0x04BC: []rune{0x04BD}, // Case map - 0x04BE: []rune{0x04BF}, // Case map - 0x04C1: []rune{0x04C2}, // Case map - 0x04C3: []rune{0x04C4}, // Case map - 0x04C5: []rune{0x04C6}, // Case map - 0x04C7: []rune{0x04C8}, // Case map - 0x04C9: []rune{0x04CA}, // Case map - 0x04CB: []rune{0x04CC}, // Case map - 0x04CD: []rune{0x04CE}, // Case map - 0x04D0: []rune{0x04D1}, // Case map - 0x04D2: []rune{0x04D3}, // Case map - 0x04D4: []rune{0x04D5}, // Case map - 0x04D6: []rune{0x04D7}, // Case map - 0x04D8: []rune{0x04D9}, // Case map - 0x04DA: []rune{0x04DB}, // Case map - 0x04DC: []rune{0x04DD}, // Case map - 0x04DE: []rune{0x04DF}, // Case map - 0x04E0: []rune{0x04E1}, // Case map - 0x04E2: []rune{0x04E3}, // Case map - 0x04E4: []rune{0x04E5}, // Case map - 0x04E6: []rune{0x04E7}, // Case map - 0x04E8: []rune{0x04E9}, // Case map - 0x04EA: []rune{0x04EB}, // Case map - 0x04EC: []rune{0x04ED}, // Case map - 0x04EE: []rune{0x04EF}, // Case map - 0x04F0: []rune{0x04F1}, // Case map - 0x04F2: []rune{0x04F3}, // Case map - 0x04F4: []rune{0x04F5}, // Case map - 0x04F8: []rune{0x04F9}, // Case map - 0x0500: []rune{0x0501}, // Case map - 0x0502: []rune{0x0503}, // Case map - 0x0504: []rune{0x0505}, // Case map - 0x0506: []rune{0x0507}, // Case map - 0x0508: []rune{0x0509}, // Case map - 0x050A: []rune{0x050B}, // Case map - 0x050C: []rune{0x050D}, // Case map - 0x050E: []rune{0x050F}, // Case map - 0x0531: []rune{0x0561}, // Case map - 0x0532: []rune{0x0562}, // Case map - 0x0533: []rune{0x0563}, // Case map - 0x0534: []rune{0x0564}, // Case map - 0x0535: []rune{0x0565}, // Case map - 0x0536: []rune{0x0566}, // Case map - 0x0537: []rune{0x0567}, // Case map - 0x0538: []rune{0x0568}, // Case map - 0x0539: []rune{0x0569}, // Case map - 0x053A: []rune{0x056A}, // Case map - 0x053B: []rune{0x056B}, // Case map - 0x053C: []rune{0x056C}, // Case map - 0x053D: []rune{0x056D}, // Case map - 0x053E: []rune{0x056E}, // Case map - 0x053F: []rune{0x056F}, // Case map - 0x0540: []rune{0x0570}, // Case map - 0x0541: []rune{0x0571}, // Case map - 0x0542: []rune{0x0572}, // Case map - 0x0543: []rune{0x0573}, // Case map - 0x0544: []rune{0x0574}, // Case map - 0x0545: []rune{0x0575}, // Case map - 0x0546: []rune{0x0576}, // Case map - 0x0547: []rune{0x0577}, // Case map - 0x0548: []rune{0x0578}, // Case map - 0x0549: []rune{0x0579}, // Case map - 0x054A: []rune{0x057A}, // Case map - 0x054B: []rune{0x057B}, // Case map - 0x054C: []rune{0x057C}, // Case map - 0x054D: []rune{0x057D}, // Case map - 0x054E: []rune{0x057E}, // Case map - 0x054F: []rune{0x057F}, // Case map - 0x0550: []rune{0x0580}, // Case map - 0x0551: []rune{0x0581}, // Case map - 0x0552: []rune{0x0582}, // Case map - 0x0553: []rune{0x0583}, // Case map - 0x0554: []rune{0x0584}, // Case map - 0x0555: []rune{0x0585}, // Case map - 0x0556: []rune{0x0586}, // Case map - 0x0587: []rune{0x0565, 0x0582}, // Case map - 0x1E00: []rune{0x1E01}, // Case map - 0x1E02: []rune{0x1E03}, // Case map - 0x1E04: []rune{0x1E05}, // Case map - 0x1E06: []rune{0x1E07}, // Case map - 0x1E08: []rune{0x1E09}, // Case map - 0x1E0A: []rune{0x1E0B}, // Case map - 0x1E0C: []rune{0x1E0D}, // Case map - 0x1E0E: []rune{0x1E0F}, // Case map - 0x1E10: []rune{0x1E11}, // Case map - 0x1E12: []rune{0x1E13}, // Case map - 0x1E14: []rune{0x1E15}, // Case map - 0x1E16: []rune{0x1E17}, // Case map - 0x1E18: []rune{0x1E19}, // Case map - 0x1E1A: []rune{0x1E1B}, // Case map - 0x1E1C: []rune{0x1E1D}, // Case map - 0x1E1E: []rune{0x1E1F}, // Case map - 0x1E20: []rune{0x1E21}, // Case map - 0x1E22: []rune{0x1E23}, // Case map - 0x1E24: []rune{0x1E25}, // Case map - 0x1E26: []rune{0x1E27}, // Case map - 0x1E28: []rune{0x1E29}, // Case map - 0x1E2A: []rune{0x1E2B}, // Case map - 0x1E2C: []rune{0x1E2D}, // Case map - 0x1E2E: []rune{0x1E2F}, // Case map - 0x1E30: []rune{0x1E31}, // Case map - 0x1E32: []rune{0x1E33}, // Case map - 0x1E34: []rune{0x1E35}, // Case map - 0x1E36: []rune{0x1E37}, // Case map - 0x1E38: []rune{0x1E39}, // Case map - 0x1E3A: []rune{0x1E3B}, // Case map - 0x1E3C: []rune{0x1E3D}, // Case map - 0x1E3E: []rune{0x1E3F}, // Case map - 0x1E40: []rune{0x1E41}, // Case map - 0x1E42: []rune{0x1E43}, // Case map - 0x1E44: []rune{0x1E45}, // Case map - 0x1E46: []rune{0x1E47}, // Case map - 0x1E48: []rune{0x1E49}, // Case map - 0x1E4A: []rune{0x1E4B}, // Case map - 0x1E4C: []rune{0x1E4D}, // Case map - 0x1E4E: []rune{0x1E4F}, // Case map - 0x1E50: []rune{0x1E51}, // Case map - 0x1E52: []rune{0x1E53}, // Case map - 0x1E54: []rune{0x1E55}, // Case map - 0x1E56: []rune{0x1E57}, // Case map - 0x1E58: []rune{0x1E59}, // Case map - 0x1E5A: []rune{0x1E5B}, // Case map - 0x1E5C: []rune{0x1E5D}, // Case map - 0x1E5E: []rune{0x1E5F}, // Case map - 0x1E60: []rune{0x1E61}, // Case map - 0x1E62: []rune{0x1E63}, // Case map - 0x1E64: []rune{0x1E65}, // Case map - 0x1E66: []rune{0x1E67}, // Case map - 0x1E68: []rune{0x1E69}, // Case map - 0x1E6A: []rune{0x1E6B}, // Case map - 0x1E6C: []rune{0x1E6D}, // Case map - 0x1E6E: []rune{0x1E6F}, // Case map - 0x1E70: []rune{0x1E71}, // Case map - 0x1E72: []rune{0x1E73}, // Case map - 0x1E74: []rune{0x1E75}, // Case map - 0x1E76: []rune{0x1E77}, // Case map - 0x1E78: []rune{0x1E79}, // Case map - 0x1E7A: []rune{0x1E7B}, // Case map - 0x1E7C: []rune{0x1E7D}, // Case map - 0x1E7E: []rune{0x1E7F}, // Case map - 0x1E80: []rune{0x1E81}, // Case map - 0x1E82: []rune{0x1E83}, // Case map - 0x1E84: []rune{0x1E85}, // Case map - 0x1E86: []rune{0x1E87}, // Case map - 0x1E88: []rune{0x1E89}, // Case map - 0x1E8A: []rune{0x1E8B}, // Case map - 0x1E8C: []rune{0x1E8D}, // Case map - 0x1E8E: []rune{0x1E8F}, // Case map - 0x1E90: []rune{0x1E91}, // Case map - 0x1E92: []rune{0x1E93}, // Case map - 0x1E94: []rune{0x1E95}, // Case map - 0x1E96: []rune{0x0068, 0x0331}, // Case map - 0x1E97: []rune{0x0074, 0x0308}, // Case map - 0x1E98: []rune{0x0077, 0x030A}, // Case map - 0x1E99: []rune{0x0079, 0x030A}, // Case map - 0x1E9A: []rune{0x0061, 0x02BE}, // Case map - 0x1E9B: []rune{0x1E61}, // Case map - 0x1EA0: []rune{0x1EA1}, // Case map - 0x1EA2: []rune{0x1EA3}, // Case map - 0x1EA4: []rune{0x1EA5}, // Case map - 0x1EA6: []rune{0x1EA7}, // Case map - 0x1EA8: []rune{0x1EA9}, // Case map - 0x1EAA: []rune{0x1EAB}, // Case map - 0x1EAC: []rune{0x1EAD}, // Case map - 0x1EAE: []rune{0x1EAF}, // Case map - 0x1EB0: []rune{0x1EB1}, // Case map - 0x1EB2: []rune{0x1EB3}, // Case map - 0x1EB4: []rune{0x1EB5}, // Case map - 0x1EB6: []rune{0x1EB7}, // Case map - 0x1EB8: []rune{0x1EB9}, // Case map - 0x1EBA: []rune{0x1EBB}, // Case map - 0x1EBC: []rune{0x1EBD}, // Case map - 0x1EBE: []rune{0x1EBF}, // Case map - 0x1EC0: []rune{0x1EC1}, // Case map - 0x1EC2: []rune{0x1EC3}, // Case map - 0x1EC4: []rune{0x1EC5}, // Case map - 0x1EC6: []rune{0x1EC7}, // Case map - 0x1EC8: []rune{0x1EC9}, // Case map - 0x1ECA: []rune{0x1ECB}, // Case map - 0x1ECC: []rune{0x1ECD}, // Case map - 0x1ECE: []rune{0x1ECF}, // Case map - 0x1ED0: []rune{0x1ED1}, // Case map - 0x1ED2: []rune{0x1ED3}, // Case map - 0x1ED4: []rune{0x1ED5}, // Case map - 0x1ED6: []rune{0x1ED7}, // Case map - 0x1ED8: []rune{0x1ED9}, // Case map - 0x1EDA: []rune{0x1EDB}, // Case map - 0x1EDC: []rune{0x1EDD}, // Case map - 0x1EDE: []rune{0x1EDF}, // Case map - 0x1EE0: []rune{0x1EE1}, // Case map - 0x1EE2: []rune{0x1EE3}, // Case map - 0x1EE4: []rune{0x1EE5}, // Case map - 0x1EE6: []rune{0x1EE7}, // Case map - 0x1EE8: []rune{0x1EE9}, // Case map - 0x1EEA: []rune{0x1EEB}, // Case map - 0x1EEC: []rune{0x1EED}, // Case map - 0x1EEE: []rune{0x1EEF}, // Case map - 0x1EF0: []rune{0x1EF1}, // Case map - 0x1EF2: []rune{0x1EF3}, // Case map - 0x1EF4: []rune{0x1EF5}, // Case map - 0x1EF6: []rune{0x1EF7}, // Case map - 0x1EF8: []rune{0x1EF9}, // Case map - 0x1F08: []rune{0x1F00}, // Case map - 0x1F09: []rune{0x1F01}, // Case map - 0x1F0A: []rune{0x1F02}, // Case map - 0x1F0B: []rune{0x1F03}, // Case map - 0x1F0C: []rune{0x1F04}, // Case map - 0x1F0D: []rune{0x1F05}, // Case map - 0x1F0E: []rune{0x1F06}, // Case map - 0x1F0F: []rune{0x1F07}, // Case map - 0x1F18: []rune{0x1F10}, // Case map - 0x1F19: []rune{0x1F11}, // Case map - 0x1F1A: []rune{0x1F12}, // Case map - 0x1F1B: []rune{0x1F13}, // Case map - 0x1F1C: []rune{0x1F14}, // Case map - 0x1F1D: []rune{0x1F15}, // Case map - 0x1F28: []rune{0x1F20}, // Case map - 0x1F29: []rune{0x1F21}, // Case map - 0x1F2A: []rune{0x1F22}, // Case map - 0x1F2B: []rune{0x1F23}, // Case map - 0x1F2C: []rune{0x1F24}, // Case map - 0x1F2D: []rune{0x1F25}, // Case map - 0x1F2E: []rune{0x1F26}, // Case map - 0x1F2F: []rune{0x1F27}, // Case map - 0x1F38: []rune{0x1F30}, // Case map - 0x1F39: []rune{0x1F31}, // Case map - 0x1F3A: []rune{0x1F32}, // Case map - 0x1F3B: []rune{0x1F33}, // Case map - 0x1F3C: []rune{0x1F34}, // Case map - 0x1F3D: []rune{0x1F35}, // Case map - 0x1F3E: []rune{0x1F36}, // Case map - 0x1F3F: []rune{0x1F37}, // Case map - 0x1F48: []rune{0x1F40}, // Case map - 0x1F49: []rune{0x1F41}, // Case map - 0x1F4A: []rune{0x1F42}, // Case map - 0x1F4B: []rune{0x1F43}, // Case map - 0x1F4C: []rune{0x1F44}, // Case map - 0x1F4D: []rune{0x1F45}, // Case map - 0x1F50: []rune{0x03C5, 0x0313}, // Case map - 0x1F52: []rune{0x03C5, 0x0313, 0x0300}, // Case map - 0x1F54: []rune{0x03C5, 0x0313, 0x0301}, // Case map - 0x1F56: []rune{0x03C5, 0x0313, 0x0342}, // Case map - 0x1F59: []rune{0x1F51}, // Case map - 0x1F5B: []rune{0x1F53}, // Case map - 0x1F5D: []rune{0x1F55}, // Case map - 0x1F5F: []rune{0x1F57}, // Case map - 0x1F68: []rune{0x1F60}, // Case map - 0x1F69: []rune{0x1F61}, // Case map - 0x1F6A: []rune{0x1F62}, // Case map - 0x1F6B: []rune{0x1F63}, // Case map - 0x1F6C: []rune{0x1F64}, // Case map - 0x1F6D: []rune{0x1F65}, // Case map - 0x1F6E: []rune{0x1F66}, // Case map - 0x1F6F: []rune{0x1F67}, // Case map - 0x1F80: []rune{0x1F00, 0x03B9}, // Case map - 0x1F81: []rune{0x1F01, 0x03B9}, // Case map - 0x1F82: []rune{0x1F02, 0x03B9}, // Case map - 0x1F83: []rune{0x1F03, 0x03B9}, // Case map - 0x1F84: []rune{0x1F04, 0x03B9}, // Case map - 0x1F85: []rune{0x1F05, 0x03B9}, // Case map - 0x1F86: []rune{0x1F06, 0x03B9}, // Case map - 0x1F87: []rune{0x1F07, 0x03B9}, // Case map - 0x1F88: []rune{0x1F00, 0x03B9}, // Case map - 0x1F89: []rune{0x1F01, 0x03B9}, // Case map - 0x1F8A: []rune{0x1F02, 0x03B9}, // Case map - 0x1F8B: []rune{0x1F03, 0x03B9}, // Case map - 0x1F8C: []rune{0x1F04, 0x03B9}, // Case map - 0x1F8D: []rune{0x1F05, 0x03B9}, // Case map - 0x1F8E: []rune{0x1F06, 0x03B9}, // Case map - 0x1F8F: []rune{0x1F07, 0x03B9}, // Case map - 0x1F90: []rune{0x1F20, 0x03B9}, // Case map - 0x1F91: []rune{0x1F21, 0x03B9}, // Case map - 0x1F92: []rune{0x1F22, 0x03B9}, // Case map - 0x1F93: []rune{0x1F23, 0x03B9}, // Case map - 0x1F94: []rune{0x1F24, 0x03B9}, // Case map - 0x1F95: []rune{0x1F25, 0x03B9}, // Case map - 0x1F96: []rune{0x1F26, 0x03B9}, // Case map - 0x1F97: []rune{0x1F27, 0x03B9}, // Case map - 0x1F98: []rune{0x1F20, 0x03B9}, // Case map - 0x1F99: []rune{0x1F21, 0x03B9}, // Case map - 0x1F9A: []rune{0x1F22, 0x03B9}, // Case map - 0x1F9B: []rune{0x1F23, 0x03B9}, // Case map - 0x1F9C: []rune{0x1F24, 0x03B9}, // Case map - 0x1F9D: []rune{0x1F25, 0x03B9}, // Case map - 0x1F9E: []rune{0x1F26, 0x03B9}, // Case map - 0x1F9F: []rune{0x1F27, 0x03B9}, // Case map - 0x1FA0: []rune{0x1F60, 0x03B9}, // Case map - 0x1FA1: []rune{0x1F61, 0x03B9}, // Case map - 0x1FA2: []rune{0x1F62, 0x03B9}, // Case map - 0x1FA3: []rune{0x1F63, 0x03B9}, // Case map - 0x1FA4: []rune{0x1F64, 0x03B9}, // Case map - 0x1FA5: []rune{0x1F65, 0x03B9}, // Case map - 0x1FA6: []rune{0x1F66, 0x03B9}, // Case map - 0x1FA7: []rune{0x1F67, 0x03B9}, // Case map - 0x1FA8: []rune{0x1F60, 0x03B9}, // Case map - 0x1FA9: []rune{0x1F61, 0x03B9}, // Case map - 0x1FAA: []rune{0x1F62, 0x03B9}, // Case map - 0x1FAB: []rune{0x1F63, 0x03B9}, // Case map - 0x1FAC: []rune{0x1F64, 0x03B9}, // Case map - 0x1FAD: []rune{0x1F65, 0x03B9}, // Case map - 0x1FAE: []rune{0x1F66, 0x03B9}, // Case map - 0x1FAF: []rune{0x1F67, 0x03B9}, // Case map - 0x1FB2: []rune{0x1F70, 0x03B9}, // Case map - 0x1FB3: []rune{0x03B1, 0x03B9}, // Case map - 0x1FB4: []rune{0x03AC, 0x03B9}, // Case map - 0x1FB6: []rune{0x03B1, 0x0342}, // Case map - 0x1FB7: []rune{0x03B1, 0x0342, 0x03B9}, // Case map - 0x1FB8: []rune{0x1FB0}, // Case map - 0x1FB9: []rune{0x1FB1}, // Case map - 0x1FBA: []rune{0x1F70}, // Case map - 0x1FBB: []rune{0x1F71}, // Case map - 0x1FBC: []rune{0x03B1, 0x03B9}, // Case map - 0x1FBE: []rune{0x03B9}, // Case map - 0x1FC2: []rune{0x1F74, 0x03B9}, // Case map - 0x1FC3: []rune{0x03B7, 0x03B9}, // Case map - 0x1FC4: []rune{0x03AE, 0x03B9}, // Case map - 0x1FC6: []rune{0x03B7, 0x0342}, // Case map - 0x1FC7: []rune{0x03B7, 0x0342, 0x03B9}, // Case map - 0x1FC8: []rune{0x1F72}, // Case map - 0x1FC9: []rune{0x1F73}, // Case map - 0x1FCA: []rune{0x1F74}, // Case map - 0x1FCB: []rune{0x1F75}, // Case map - 0x1FCC: []rune{0x03B7, 0x03B9}, // Case map - 0x1FD2: []rune{0x03B9, 0x0308, 0x0300}, // Case map - 0x1FD3: []rune{0x03B9, 0x0308, 0x0301}, // Case map - 0x1FD6: []rune{0x03B9, 0x0342}, // Case map - 0x1FD7: []rune{0x03B9, 0x0308, 0x0342}, // Case map - 0x1FD8: []rune{0x1FD0}, // Case map - 0x1FD9: []rune{0x1FD1}, // Case map - 0x1FDA: []rune{0x1F76}, // Case map - 0x1FDB: []rune{0x1F77}, // Case map - 0x1FE2: []rune{0x03C5, 0x0308, 0x0300}, // Case map - 0x1FE3: []rune{0x03C5, 0x0308, 0x0301}, // Case map - 0x1FE4: []rune{0x03C1, 0x0313}, // Case map - 0x1FE6: []rune{0x03C5, 0x0342}, // Case map - 0x1FE7: []rune{0x03C5, 0x0308, 0x0342}, // Case map - 0x1FE8: []rune{0x1FE0}, // Case map - 0x1FE9: []rune{0x1FE1}, // Case map - 0x1FEA: []rune{0x1F7A}, // Case map - 0x1FEB: []rune{0x1F7B}, // Case map - 0x1FEC: []rune{0x1FE5}, // Case map - 0x1FF2: []rune{0x1F7C, 0x03B9}, // Case map - 0x1FF3: []rune{0x03C9, 0x03B9}, // Case map - 0x1FF4: []rune{0x03CE, 0x03B9}, // Case map - 0x1FF6: []rune{0x03C9, 0x0342}, // Case map - 0x1FF7: []rune{0x03C9, 0x0342, 0x03B9}, // Case map - 0x1FF8: []rune{0x1F78}, // Case map - 0x1FF9: []rune{0x1F79}, // Case map - 0x1FFA: []rune{0x1F7C}, // Case map - 0x1FFB: []rune{0x1F7D}, // Case map - 0x1FFC: []rune{0x03C9, 0x03B9}, // Case map - 0x2126: []rune{0x03C9}, // Case map - 0x212A: []rune{0x006B}, // Case map - 0x212B: []rune{0x00E5}, // Case map - 0x2160: []rune{0x2170}, // Case map - 0x2161: []rune{0x2171}, // Case map - 0x2162: []rune{0x2172}, // Case map - 0x2163: []rune{0x2173}, // Case map - 0x2164: []rune{0x2174}, // Case map - 0x2165: []rune{0x2175}, // Case map - 0x2166: []rune{0x2176}, // Case map - 0x2167: []rune{0x2177}, // Case map - 0x2168: []rune{0x2178}, // Case map - 0x2169: []rune{0x2179}, // Case map - 0x216A: []rune{0x217A}, // Case map - 0x216B: []rune{0x217B}, // Case map - 0x216C: []rune{0x217C}, // Case map - 0x216D: []rune{0x217D}, // Case map - 0x216E: []rune{0x217E}, // Case map - 0x216F: []rune{0x217F}, // Case map - 0x24B6: []rune{0x24D0}, // Case map - 0x24B7: []rune{0x24D1}, // Case map - 0x24B8: []rune{0x24D2}, // Case map - 0x24B9: []rune{0x24D3}, // Case map - 0x24BA: []rune{0x24D4}, // Case map - 0x24BB: []rune{0x24D5}, // Case map - 0x24BC: []rune{0x24D6}, // Case map - 0x24BD: []rune{0x24D7}, // Case map - 0x24BE: []rune{0x24D8}, // Case map - 0x24BF: []rune{0x24D9}, // Case map - 0x24C0: []rune{0x24DA}, // Case map - 0x24C1: []rune{0x24DB}, // Case map - 0x24C2: []rune{0x24DC}, // Case map - 0x24C3: []rune{0x24DD}, // Case map - 0x24C4: []rune{0x24DE}, // Case map - 0x24C5: []rune{0x24DF}, // Case map - 0x24C6: []rune{0x24E0}, // Case map - 0x24C7: []rune{0x24E1}, // Case map - 0x24C8: []rune{0x24E2}, // Case map - 0x24C9: []rune{0x24E3}, // Case map - 0x24CA: []rune{0x24E4}, // Case map - 0x24CB: []rune{0x24E5}, // Case map - 0x24CC: []rune{0x24E6}, // Case map - 0x24CD: []rune{0x24E7}, // Case map - 0x24CE: []rune{0x24E8}, // Case map - 0x24CF: []rune{0x24E9}, // Case map - 0xFB00: []rune{0x0066, 0x0066}, // Case map - 0xFB01: []rune{0x0066, 0x0069}, // Case map - 0xFB02: []rune{0x0066, 0x006C}, // Case map - 0xFB03: []rune{0x0066, 0x0066, 0x0069}, // Case map - 0xFB04: []rune{0x0066, 0x0066, 0x006C}, // Case map - 0xFB05: []rune{0x0073, 0x0074}, // Case map - 0xFB06: []rune{0x0073, 0x0074}, // Case map - 0xFB13: []rune{0x0574, 0x0576}, // Case map - 0xFB14: []rune{0x0574, 0x0565}, // Case map - 0xFB15: []rune{0x0574, 0x056B}, // Case map - 0xFB16: []rune{0x057E, 0x0576}, // Case map - 0xFB17: []rune{0x0574, 0x056D}, // Case map - 0xFF21: []rune{0xFF41}, // Case map - 0xFF22: []rune{0xFF42}, // Case map - 0xFF23: []rune{0xFF43}, // Case map - 0xFF24: []rune{0xFF44}, // Case map - 0xFF25: []rune{0xFF45}, // Case map - 0xFF26: []rune{0xFF46}, // Case map - 0xFF27: []rune{0xFF47}, // Case map - 0xFF28: []rune{0xFF48}, // Case map - 0xFF29: []rune{0xFF49}, // Case map - 0xFF2A: []rune{0xFF4A}, // Case map - 0xFF2B: []rune{0xFF4B}, // Case map - 0xFF2C: []rune{0xFF4C}, // Case map - 0xFF2D: []rune{0xFF4D}, // Case map - 0xFF2E: []rune{0xFF4E}, // Case map - 0xFF2F: []rune{0xFF4F}, // Case map - 0xFF30: []rune{0xFF50}, // Case map - 0xFF31: []rune{0xFF51}, // Case map - 0xFF32: []rune{0xFF52}, // Case map - 0xFF33: []rune{0xFF53}, // Case map - 0xFF34: []rune{0xFF54}, // Case map - 0xFF35: []rune{0xFF55}, // Case map - 0xFF36: []rune{0xFF56}, // Case map - 0xFF37: []rune{0xFF57}, // Case map - 0xFF38: []rune{0xFF58}, // Case map - 0xFF39: []rune{0xFF59}, // Case map - 0xFF3A: []rune{0xFF5A}, // Case map - 0x10400: []rune{0x10428}, // Case map - 0x10401: []rune{0x10429}, // Case map - 0x10402: []rune{0x1042A}, // Case map - 0x10403: []rune{0x1042B}, // Case map - 0x10404: []rune{0x1042C}, // Case map - 0x10405: []rune{0x1042D}, // Case map - 0x10406: []rune{0x1042E}, // Case map - 0x10407: []rune{0x1042F}, // Case map - 0x10408: []rune{0x10430}, // Case map - 0x10409: []rune{0x10431}, // Case map - 0x1040A: []rune{0x10432}, // Case map - 0x1040B: []rune{0x10433}, // Case map - 0x1040C: []rune{0x10434}, // Case map - 0x1040D: []rune{0x10435}, // Case map - 0x1040E: []rune{0x10436}, // Case map - 0x1040F: []rune{0x10437}, // Case map - 0x10410: []rune{0x10438}, // Case map - 0x10411: []rune{0x10439}, // Case map - 0x10412: []rune{0x1043A}, // Case map - 0x10413: []rune{0x1043B}, // Case map - 0x10414: []rune{0x1043C}, // Case map - 0x10415: []rune{0x1043D}, // Case map - 0x10416: []rune{0x1043E}, // Case map - 0x10417: []rune{0x1043F}, // Case map - 0x10418: []rune{0x10440}, // Case map - 0x10419: []rune{0x10441}, // Case map - 0x1041A: []rune{0x10442}, // Case map - 0x1041B: []rune{0x10443}, // Case map - 0x1041C: []rune{0x10444}, // Case map - 0x1041D: []rune{0x10445}, // Case map - 0x1041E: []rune{0x10446}, // Case map - 0x1041F: []rune{0x10447}, // Case map - 0x10420: []rune{0x10448}, // Case map - 0x10421: []rune{0x10449}, // Case map - 0x10422: []rune{0x1044A}, // Case map - 0x10423: []rune{0x1044B}, // Case map - 0x10424: []rune{0x1044C}, // Case map - 0x10425: []rune{0x1044D}, // Case map -} - -// TableB3 represents RFC-3454 Table B.3. -var TableB3 Mapping = tableB3 - -var tableC1_1 = Set{ - RuneRange{0x0020, 0x0020}, // SPACE -} - -// TableC1_1 represents RFC-3454 Table C.1.1. -var TableC1_1 Set = tableC1_1 - -var tableC1_2 = Set{ - RuneRange{0x00A0, 0x00A0}, // NO-BREAK SPACE - RuneRange{0x1680, 0x1680}, // OGHAM SPACE MARK - RuneRange{0x2000, 0x2000}, // EN QUAD - RuneRange{0x2001, 0x2001}, // EM QUAD - RuneRange{0x2002, 0x2002}, // EN SPACE - RuneRange{0x2003, 0x2003}, // EM SPACE - RuneRange{0x2004, 0x2004}, // THREE-PER-EM SPACE - RuneRange{0x2005, 0x2005}, // FOUR-PER-EM SPACE - RuneRange{0x2006, 0x2006}, // SIX-PER-EM SPACE - RuneRange{0x2007, 0x2007}, // FIGURE SPACE - RuneRange{0x2008, 0x2008}, // PUNCTUATION SPACE - RuneRange{0x2009, 0x2009}, // THIN SPACE - RuneRange{0x200A, 0x200A}, // HAIR SPACE - RuneRange{0x200B, 0x200B}, // ZERO WIDTH SPACE - RuneRange{0x202F, 0x202F}, // NARROW NO-BREAK SPACE - RuneRange{0x205F, 0x205F}, // MEDIUM MATHEMATICAL SPACE - RuneRange{0x3000, 0x3000}, // IDEOGRAPHIC SPACE -} - -// TableC1_2 represents RFC-3454 Table C.1.2. -var TableC1_2 Set = tableC1_2 - -var tableC2_1 = Set{ - RuneRange{0x0000, 0x001F}, // [CONTROL CHARACTERS] - RuneRange{0x007F, 0x007F}, // DELETE -} - -// TableC2_1 represents RFC-3454 Table C.2.1. -var TableC2_1 Set = tableC2_1 - -var tableC2_2 = Set{ - RuneRange{0x0080, 0x009F}, // [CONTROL CHARACTERS] - RuneRange{0x06DD, 0x06DD}, // ARABIC END OF AYAH - RuneRange{0x070F, 0x070F}, // SYRIAC ABBREVIATION MARK - RuneRange{0x180E, 0x180E}, // MONGOLIAN VOWEL SEPARATOR - RuneRange{0x200C, 0x200C}, // ZERO WIDTH NON-JOINER - RuneRange{0x200D, 0x200D}, // ZERO WIDTH JOINER - RuneRange{0x2028, 0x2028}, // LINE SEPARATOR - RuneRange{0x2029, 0x2029}, // PARAGRAPH SEPARATOR - RuneRange{0x2060, 0x2060}, // WORD JOINER - RuneRange{0x2061, 0x2061}, // FUNCTION APPLICATION - RuneRange{0x2062, 0x2062}, // INVISIBLE TIMES - RuneRange{0x2063, 0x2063}, // INVISIBLE SEPARATOR - RuneRange{0x206A, 0x206F}, // [CONTROL CHARACTERS] - RuneRange{0xFEFF, 0xFEFF}, // ZERO WIDTH NO-BREAK SPACE - RuneRange{0xFFF9, 0xFFFC}, // [CONTROL CHARACTERS] - RuneRange{0x1D173, 0x1D17A}, // [MUSICAL CONTROL CHARACTERS] -} - -// TableC2_2 represents RFC-3454 Table C.2.2. -var TableC2_2 Set = tableC2_2 - -var tableC3 = Set{ - RuneRange{0xE000, 0xF8FF}, // [PRIVATE USE, PLANE 0] - RuneRange{0xF0000, 0xFFFFD}, // [PRIVATE USE, PLANE 15] - RuneRange{0x100000, 0x10FFFD}, // [PRIVATE USE, PLANE 16] -} - -// TableC3 represents RFC-3454 Table C.3. -var TableC3 Set = tableC3 - -var tableC4 = Set{ - RuneRange{0xFDD0, 0xFDEF}, // [NONCHARACTER CODE POINTS] - RuneRange{0xFFFE, 0xFFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x1FFFE, 0x1FFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x2FFFE, 0x2FFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x3FFFE, 0x3FFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x4FFFE, 0x4FFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x5FFFE, 0x5FFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x6FFFE, 0x6FFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x7FFFE, 0x7FFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x8FFFE, 0x8FFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x9FFFE, 0x9FFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0xAFFFE, 0xAFFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0xBFFFE, 0xBFFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0xCFFFE, 0xCFFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0xDFFFE, 0xDFFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0xEFFFE, 0xEFFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0xFFFFE, 0xFFFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x10FFFE, 0x10FFFF}, // [NONCHARACTER CODE POINTS] -} - -// TableC4 represents RFC-3454 Table C.4. -var TableC4 Set = tableC4 - -var tableC5 = Set{ - RuneRange{0xD800, 0xDFFF}, // [SURROGATE CODES] -} - -// TableC5 represents RFC-3454 Table C.5. -var TableC5 Set = tableC5 - -var tableC6 = Set{ - RuneRange{0xFFF9, 0xFFF9}, // INTERLINEAR ANNOTATION ANCHOR - RuneRange{0xFFFA, 0xFFFA}, // INTERLINEAR ANNOTATION SEPARATOR - RuneRange{0xFFFB, 0xFFFB}, // INTERLINEAR ANNOTATION TERMINATOR - RuneRange{0xFFFC, 0xFFFC}, // OBJECT REPLACEMENT CHARACTER - RuneRange{0xFFFD, 0xFFFD}, // REPLACEMENT CHARACTER -} - -// TableC6 represents RFC-3454 Table C.6. -var TableC6 Set = tableC6 - -var tableC7 = Set{ - RuneRange{0x2FF0, 0x2FFB}, // [IDEOGRAPHIC DESCRIPTION CHARACTERS] -} - -// TableC7 represents RFC-3454 Table C.7. -var TableC7 Set = tableC7 - -var tableC8 = Set{ - RuneRange{0x0340, 0x0340}, // COMBINING GRAVE TONE MARK - RuneRange{0x0341, 0x0341}, // COMBINING ACUTE TONE MARK - RuneRange{0x200E, 0x200E}, // LEFT-TO-RIGHT MARK - RuneRange{0x200F, 0x200F}, // RIGHT-TO-LEFT MARK - RuneRange{0x202A, 0x202A}, // LEFT-TO-RIGHT EMBEDDING - RuneRange{0x202B, 0x202B}, // RIGHT-TO-LEFT EMBEDDING - RuneRange{0x202C, 0x202C}, // POP DIRECTIONAL FORMATTING - RuneRange{0x202D, 0x202D}, // LEFT-TO-RIGHT OVERRIDE - RuneRange{0x202E, 0x202E}, // RIGHT-TO-LEFT OVERRIDE - RuneRange{0x206A, 0x206A}, // INHIBIT SYMMETRIC SWAPPING - RuneRange{0x206B, 0x206B}, // ACTIVATE SYMMETRIC SWAPPING - RuneRange{0x206C, 0x206C}, // INHIBIT ARABIC FORM SHAPING - RuneRange{0x206D, 0x206D}, // ACTIVATE ARABIC FORM SHAPING - RuneRange{0x206E, 0x206E}, // NATIONAL DIGIT SHAPES - RuneRange{0x206F, 0x206F}, // NOMINAL DIGIT SHAPES -} - -// TableC8 represents RFC-3454 Table C.8. -var TableC8 Set = tableC8 - -var tableC9 = Set{ - RuneRange{0xE0001, 0xE0001}, // LANGUAGE TAG - RuneRange{0xE0020, 0xE007F}, // [TAGGING CHARACTERS] -} - -// TableC9 represents RFC-3454 Table C.9. -var TableC9 Set = tableC9 - -var tableD1 = Set{ - RuneRange{0x05BE, 0x05BE}, - RuneRange{0x05C0, 0x05C0}, - RuneRange{0x05C3, 0x05C3}, - RuneRange{0x05D0, 0x05EA}, - RuneRange{0x05F0, 0x05F4}, - RuneRange{0x061B, 0x061B}, - RuneRange{0x061F, 0x061F}, - RuneRange{0x0621, 0x063A}, - RuneRange{0x0640, 0x064A}, - RuneRange{0x066D, 0x066F}, - RuneRange{0x0671, 0x06D5}, - RuneRange{0x06DD, 0x06DD}, - RuneRange{0x06E5, 0x06E6}, - RuneRange{0x06FA, 0x06FE}, - RuneRange{0x0700, 0x070D}, - RuneRange{0x0710, 0x0710}, - RuneRange{0x0712, 0x072C}, - RuneRange{0x0780, 0x07A5}, - RuneRange{0x07B1, 0x07B1}, - RuneRange{0x200F, 0x200F}, - RuneRange{0xFB1D, 0xFB1D}, - RuneRange{0xFB1F, 0xFB28}, - RuneRange{0xFB2A, 0xFB36}, - RuneRange{0xFB38, 0xFB3C}, - RuneRange{0xFB3E, 0xFB3E}, - RuneRange{0xFB40, 0xFB41}, - RuneRange{0xFB43, 0xFB44}, - RuneRange{0xFB46, 0xFBB1}, - RuneRange{0xFBD3, 0xFD3D}, - RuneRange{0xFD50, 0xFD8F}, - RuneRange{0xFD92, 0xFDC7}, - RuneRange{0xFDF0, 0xFDFC}, - RuneRange{0xFE70, 0xFE74}, - RuneRange{0xFE76, 0xFEFC}, -} - -// TableD1 represents RFC-3454 Table D.1. -var TableD1 Set = tableD1 - -var tableD2 = Set{ - RuneRange{0x0041, 0x005A}, - RuneRange{0x0061, 0x007A}, - RuneRange{0x00AA, 0x00AA}, - RuneRange{0x00B5, 0x00B5}, - RuneRange{0x00BA, 0x00BA}, - RuneRange{0x00C0, 0x00D6}, - RuneRange{0x00D8, 0x00F6}, - RuneRange{0x00F8, 0x0220}, - RuneRange{0x0222, 0x0233}, - RuneRange{0x0250, 0x02AD}, - RuneRange{0x02B0, 0x02B8}, - RuneRange{0x02BB, 0x02C1}, - RuneRange{0x02D0, 0x02D1}, - RuneRange{0x02E0, 0x02E4}, - RuneRange{0x02EE, 0x02EE}, - RuneRange{0x037A, 0x037A}, - RuneRange{0x0386, 0x0386}, - RuneRange{0x0388, 0x038A}, - RuneRange{0x038C, 0x038C}, - RuneRange{0x038E, 0x03A1}, - RuneRange{0x03A3, 0x03CE}, - RuneRange{0x03D0, 0x03F5}, - RuneRange{0x0400, 0x0482}, - RuneRange{0x048A, 0x04CE}, - RuneRange{0x04D0, 0x04F5}, - RuneRange{0x04F8, 0x04F9}, - RuneRange{0x0500, 0x050F}, - RuneRange{0x0531, 0x0556}, - RuneRange{0x0559, 0x055F}, - RuneRange{0x0561, 0x0587}, - RuneRange{0x0589, 0x0589}, - RuneRange{0x0903, 0x0903}, - RuneRange{0x0905, 0x0939}, - RuneRange{0x093D, 0x0940}, - RuneRange{0x0949, 0x094C}, - RuneRange{0x0950, 0x0950}, - RuneRange{0x0958, 0x0961}, - RuneRange{0x0964, 0x0970}, - RuneRange{0x0982, 0x0983}, - RuneRange{0x0985, 0x098C}, - RuneRange{0x098F, 0x0990}, - RuneRange{0x0993, 0x09A8}, - RuneRange{0x09AA, 0x09B0}, - RuneRange{0x09B2, 0x09B2}, - RuneRange{0x09B6, 0x09B9}, - RuneRange{0x09BE, 0x09C0}, - RuneRange{0x09C7, 0x09C8}, - RuneRange{0x09CB, 0x09CC}, - RuneRange{0x09D7, 0x09D7}, - RuneRange{0x09DC, 0x09DD}, - RuneRange{0x09DF, 0x09E1}, - RuneRange{0x09E6, 0x09F1}, - RuneRange{0x09F4, 0x09FA}, - RuneRange{0x0A05, 0x0A0A}, - RuneRange{0x0A0F, 0x0A10}, - RuneRange{0x0A13, 0x0A28}, - RuneRange{0x0A2A, 0x0A30}, - RuneRange{0x0A32, 0x0A33}, - RuneRange{0x0A35, 0x0A36}, - RuneRange{0x0A38, 0x0A39}, - RuneRange{0x0A3E, 0x0A40}, - RuneRange{0x0A59, 0x0A5C}, - RuneRange{0x0A5E, 0x0A5E}, - RuneRange{0x0A66, 0x0A6F}, - RuneRange{0x0A72, 0x0A74}, - RuneRange{0x0A83, 0x0A83}, - RuneRange{0x0A85, 0x0A8B}, - RuneRange{0x0A8D, 0x0A8D}, - RuneRange{0x0A8F, 0x0A91}, - RuneRange{0x0A93, 0x0AA8}, - RuneRange{0x0AAA, 0x0AB0}, - RuneRange{0x0AB2, 0x0AB3}, - RuneRange{0x0AB5, 0x0AB9}, - RuneRange{0x0ABD, 0x0AC0}, - RuneRange{0x0AC9, 0x0AC9}, - RuneRange{0x0ACB, 0x0ACC}, - RuneRange{0x0AD0, 0x0AD0}, - RuneRange{0x0AE0, 0x0AE0}, - RuneRange{0x0AE6, 0x0AEF}, - RuneRange{0x0B02, 0x0B03}, - RuneRange{0x0B05, 0x0B0C}, - RuneRange{0x0B0F, 0x0B10}, - RuneRange{0x0B13, 0x0B28}, - RuneRange{0x0B2A, 0x0B30}, - RuneRange{0x0B32, 0x0B33}, - RuneRange{0x0B36, 0x0B39}, - RuneRange{0x0B3D, 0x0B3E}, - RuneRange{0x0B40, 0x0B40}, - RuneRange{0x0B47, 0x0B48}, - RuneRange{0x0B4B, 0x0B4C}, - RuneRange{0x0B57, 0x0B57}, - RuneRange{0x0B5C, 0x0B5D}, - RuneRange{0x0B5F, 0x0B61}, - RuneRange{0x0B66, 0x0B70}, - RuneRange{0x0B83, 0x0B83}, - RuneRange{0x0B85, 0x0B8A}, - RuneRange{0x0B8E, 0x0B90}, - RuneRange{0x0B92, 0x0B95}, - RuneRange{0x0B99, 0x0B9A}, - RuneRange{0x0B9C, 0x0B9C}, - RuneRange{0x0B9E, 0x0B9F}, - RuneRange{0x0BA3, 0x0BA4}, - RuneRange{0x0BA8, 0x0BAA}, - RuneRange{0x0BAE, 0x0BB5}, - RuneRange{0x0BB7, 0x0BB9}, - RuneRange{0x0BBE, 0x0BBF}, - RuneRange{0x0BC1, 0x0BC2}, - RuneRange{0x0BC6, 0x0BC8}, - RuneRange{0x0BCA, 0x0BCC}, - RuneRange{0x0BD7, 0x0BD7}, - RuneRange{0x0BE7, 0x0BF2}, - RuneRange{0x0C01, 0x0C03}, - RuneRange{0x0C05, 0x0C0C}, - RuneRange{0x0C0E, 0x0C10}, - RuneRange{0x0C12, 0x0C28}, - RuneRange{0x0C2A, 0x0C33}, - RuneRange{0x0C35, 0x0C39}, - RuneRange{0x0C41, 0x0C44}, - RuneRange{0x0C60, 0x0C61}, - RuneRange{0x0C66, 0x0C6F}, - RuneRange{0x0C82, 0x0C83}, - RuneRange{0x0C85, 0x0C8C}, - RuneRange{0x0C8E, 0x0C90}, - RuneRange{0x0C92, 0x0CA8}, - RuneRange{0x0CAA, 0x0CB3}, - RuneRange{0x0CB5, 0x0CB9}, - RuneRange{0x0CBE, 0x0CBE}, - RuneRange{0x0CC0, 0x0CC4}, - RuneRange{0x0CC7, 0x0CC8}, - RuneRange{0x0CCA, 0x0CCB}, - RuneRange{0x0CD5, 0x0CD6}, - RuneRange{0x0CDE, 0x0CDE}, - RuneRange{0x0CE0, 0x0CE1}, - RuneRange{0x0CE6, 0x0CEF}, - RuneRange{0x0D02, 0x0D03}, - RuneRange{0x0D05, 0x0D0C}, - RuneRange{0x0D0E, 0x0D10}, - RuneRange{0x0D12, 0x0D28}, - RuneRange{0x0D2A, 0x0D39}, - RuneRange{0x0D3E, 0x0D40}, - RuneRange{0x0D46, 0x0D48}, - RuneRange{0x0D4A, 0x0D4C}, - RuneRange{0x0D57, 0x0D57}, - RuneRange{0x0D60, 0x0D61}, - RuneRange{0x0D66, 0x0D6F}, - RuneRange{0x0D82, 0x0D83}, - RuneRange{0x0D85, 0x0D96}, - RuneRange{0x0D9A, 0x0DB1}, - RuneRange{0x0DB3, 0x0DBB}, - RuneRange{0x0DBD, 0x0DBD}, - RuneRange{0x0DC0, 0x0DC6}, - RuneRange{0x0DCF, 0x0DD1}, - RuneRange{0x0DD8, 0x0DDF}, - RuneRange{0x0DF2, 0x0DF4}, - RuneRange{0x0E01, 0x0E30}, - RuneRange{0x0E32, 0x0E33}, - RuneRange{0x0E40, 0x0E46}, - RuneRange{0x0E4F, 0x0E5B}, - RuneRange{0x0E81, 0x0E82}, - RuneRange{0x0E84, 0x0E84}, - RuneRange{0x0E87, 0x0E88}, - RuneRange{0x0E8A, 0x0E8A}, - RuneRange{0x0E8D, 0x0E8D}, - RuneRange{0x0E94, 0x0E97}, - RuneRange{0x0E99, 0x0E9F}, - RuneRange{0x0EA1, 0x0EA3}, - RuneRange{0x0EA5, 0x0EA5}, - RuneRange{0x0EA7, 0x0EA7}, - RuneRange{0x0EAA, 0x0EAB}, - RuneRange{0x0EAD, 0x0EB0}, - RuneRange{0x0EB2, 0x0EB3}, - RuneRange{0x0EBD, 0x0EBD}, - RuneRange{0x0EC0, 0x0EC4}, - RuneRange{0x0EC6, 0x0EC6}, - RuneRange{0x0ED0, 0x0ED9}, - RuneRange{0x0EDC, 0x0EDD}, - RuneRange{0x0F00, 0x0F17}, - RuneRange{0x0F1A, 0x0F34}, - RuneRange{0x0F36, 0x0F36}, - RuneRange{0x0F38, 0x0F38}, - RuneRange{0x0F3E, 0x0F47}, - RuneRange{0x0F49, 0x0F6A}, - RuneRange{0x0F7F, 0x0F7F}, - RuneRange{0x0F85, 0x0F85}, - RuneRange{0x0F88, 0x0F8B}, - RuneRange{0x0FBE, 0x0FC5}, - RuneRange{0x0FC7, 0x0FCC}, - RuneRange{0x0FCF, 0x0FCF}, - RuneRange{0x1000, 0x1021}, - RuneRange{0x1023, 0x1027}, - RuneRange{0x1029, 0x102A}, - RuneRange{0x102C, 0x102C}, - RuneRange{0x1031, 0x1031}, - RuneRange{0x1038, 0x1038}, - RuneRange{0x1040, 0x1057}, - RuneRange{0x10A0, 0x10C5}, - RuneRange{0x10D0, 0x10F8}, - RuneRange{0x10FB, 0x10FB}, - RuneRange{0x1100, 0x1159}, - RuneRange{0x115F, 0x11A2}, - RuneRange{0x11A8, 0x11F9}, - RuneRange{0x1200, 0x1206}, - RuneRange{0x1208, 0x1246}, - RuneRange{0x1248, 0x1248}, - RuneRange{0x124A, 0x124D}, - RuneRange{0x1250, 0x1256}, - RuneRange{0x1258, 0x1258}, - RuneRange{0x125A, 0x125D}, - RuneRange{0x1260, 0x1286}, - RuneRange{0x1288, 0x1288}, - RuneRange{0x128A, 0x128D}, - RuneRange{0x1290, 0x12AE}, - RuneRange{0x12B0, 0x12B0}, - RuneRange{0x12B2, 0x12B5}, - RuneRange{0x12B8, 0x12BE}, - RuneRange{0x12C0, 0x12C0}, - RuneRange{0x12C2, 0x12C5}, - RuneRange{0x12C8, 0x12CE}, - RuneRange{0x12D0, 0x12D6}, - RuneRange{0x12D8, 0x12EE}, - RuneRange{0x12F0, 0x130E}, - RuneRange{0x1310, 0x1310}, - RuneRange{0x1312, 0x1315}, - RuneRange{0x1318, 0x131E}, - RuneRange{0x1320, 0x1346}, - RuneRange{0x1348, 0x135A}, - RuneRange{0x1361, 0x137C}, - RuneRange{0x13A0, 0x13F4}, - RuneRange{0x1401, 0x1676}, - RuneRange{0x1681, 0x169A}, - RuneRange{0x16A0, 0x16F0}, - RuneRange{0x1700, 0x170C}, - RuneRange{0x170E, 0x1711}, - RuneRange{0x1720, 0x1731}, - RuneRange{0x1735, 0x1736}, - RuneRange{0x1740, 0x1751}, - RuneRange{0x1760, 0x176C}, - RuneRange{0x176E, 0x1770}, - RuneRange{0x1780, 0x17B6}, - RuneRange{0x17BE, 0x17C5}, - RuneRange{0x17C7, 0x17C8}, - RuneRange{0x17D4, 0x17DA}, - RuneRange{0x17DC, 0x17DC}, - RuneRange{0x17E0, 0x17E9}, - RuneRange{0x1810, 0x1819}, - RuneRange{0x1820, 0x1877}, - RuneRange{0x1880, 0x18A8}, - RuneRange{0x1E00, 0x1E9B}, - RuneRange{0x1EA0, 0x1EF9}, - RuneRange{0x1F00, 0x1F15}, - RuneRange{0x1F18, 0x1F1D}, - RuneRange{0x1F20, 0x1F45}, - RuneRange{0x1F48, 0x1F4D}, - RuneRange{0x1F50, 0x1F57}, - RuneRange{0x1F59, 0x1F59}, - RuneRange{0x1F5B, 0x1F5B}, - RuneRange{0x1F5D, 0x1F5D}, - RuneRange{0x1F5F, 0x1F7D}, - RuneRange{0x1F80, 0x1FB4}, - RuneRange{0x1FB6, 0x1FBC}, - RuneRange{0x1FBE, 0x1FBE}, - RuneRange{0x1FC2, 0x1FC4}, - RuneRange{0x1FC6, 0x1FCC}, - RuneRange{0x1FD0, 0x1FD3}, - RuneRange{0x1FD6, 0x1FDB}, - RuneRange{0x1FE0, 0x1FEC}, - RuneRange{0x1FF2, 0x1FF4}, - RuneRange{0x1FF6, 0x1FFC}, - RuneRange{0x200E, 0x200E}, - RuneRange{0x2071, 0x2071}, - RuneRange{0x207F, 0x207F}, - RuneRange{0x2102, 0x2102}, - RuneRange{0x2107, 0x2107}, - RuneRange{0x210A, 0x2113}, - RuneRange{0x2115, 0x2115}, - RuneRange{0x2119, 0x211D}, - RuneRange{0x2124, 0x2124}, - RuneRange{0x2126, 0x2126}, - RuneRange{0x2128, 0x2128}, - RuneRange{0x212A, 0x212D}, - RuneRange{0x212F, 0x2131}, - RuneRange{0x2133, 0x2139}, - RuneRange{0x213D, 0x213F}, - RuneRange{0x2145, 0x2149}, - RuneRange{0x2160, 0x2183}, - RuneRange{0x2336, 0x237A}, - RuneRange{0x2395, 0x2395}, - RuneRange{0x249C, 0x24E9}, - RuneRange{0x3005, 0x3007}, - RuneRange{0x3021, 0x3029}, - RuneRange{0x3031, 0x3035}, - RuneRange{0x3038, 0x303C}, - RuneRange{0x3041, 0x3096}, - RuneRange{0x309D, 0x309F}, - RuneRange{0x30A1, 0x30FA}, - RuneRange{0x30FC, 0x30FF}, - RuneRange{0x3105, 0x312C}, - RuneRange{0x3131, 0x318E}, - RuneRange{0x3190, 0x31B7}, - RuneRange{0x31F0, 0x321C}, - RuneRange{0x3220, 0x3243}, - RuneRange{0x3260, 0x327B}, - RuneRange{0x327F, 0x32B0}, - RuneRange{0x32C0, 0x32CB}, - RuneRange{0x32D0, 0x32FE}, - RuneRange{0x3300, 0x3376}, - RuneRange{0x337B, 0x33DD}, - RuneRange{0x33E0, 0x33FE}, - RuneRange{0x3400, 0x4DB5}, - RuneRange{0x4E00, 0x9FA5}, - RuneRange{0xA000, 0xA48C}, - RuneRange{0xAC00, 0xD7A3}, - RuneRange{0xD800, 0xFA2D}, - RuneRange{0xFA30, 0xFA6A}, - RuneRange{0xFB00, 0xFB06}, - RuneRange{0xFB13, 0xFB17}, - RuneRange{0xFF21, 0xFF3A}, - RuneRange{0xFF41, 0xFF5A}, - RuneRange{0xFF66, 0xFFBE}, - RuneRange{0xFFC2, 0xFFC7}, - RuneRange{0xFFCA, 0xFFCF}, - RuneRange{0xFFD2, 0xFFD7}, - RuneRange{0xFFDA, 0xFFDC}, - RuneRange{0x10300, 0x1031E}, - RuneRange{0x10320, 0x10323}, - RuneRange{0x10330, 0x1034A}, - RuneRange{0x10400, 0x10425}, - RuneRange{0x10428, 0x1044D}, - RuneRange{0x1D000, 0x1D0F5}, - RuneRange{0x1D100, 0x1D126}, - RuneRange{0x1D12A, 0x1D166}, - RuneRange{0x1D16A, 0x1D172}, - RuneRange{0x1D183, 0x1D184}, - RuneRange{0x1D18C, 0x1D1A9}, - RuneRange{0x1D1AE, 0x1D1DD}, - RuneRange{0x1D400, 0x1D454}, - RuneRange{0x1D456, 0x1D49C}, - RuneRange{0x1D49E, 0x1D49F}, - RuneRange{0x1D4A2, 0x1D4A2}, - RuneRange{0x1D4A5, 0x1D4A6}, - RuneRange{0x1D4A9, 0x1D4AC}, - RuneRange{0x1D4AE, 0x1D4B9}, - RuneRange{0x1D4BB, 0x1D4BB}, - RuneRange{0x1D4BD, 0x1D4C0}, - RuneRange{0x1D4C2, 0x1D4C3}, - RuneRange{0x1D4C5, 0x1D505}, - RuneRange{0x1D507, 0x1D50A}, - RuneRange{0x1D50D, 0x1D514}, - RuneRange{0x1D516, 0x1D51C}, - RuneRange{0x1D51E, 0x1D539}, - RuneRange{0x1D53B, 0x1D53E}, - RuneRange{0x1D540, 0x1D544}, - RuneRange{0x1D546, 0x1D546}, - RuneRange{0x1D54A, 0x1D550}, - RuneRange{0x1D552, 0x1D6A3}, - RuneRange{0x1D6A8, 0x1D7C9}, - RuneRange{0x20000, 0x2A6D6}, - RuneRange{0x2F800, 0x2FA1D}, - RuneRange{0xF0000, 0xFFFFD}, - RuneRange{0x100000, 0x10FFFD}, -} - -// TableD2 represents RFC-3454 Table D.2. -var TableD2 Set = tableD2 diff --git a/backend/services/controller/vendor/github.com/youmark/pkcs8/.gitignore b/backend/services/controller/vendor/github.com/youmark/pkcs8/.gitignore deleted file mode 100644 index 8365624..0000000 --- a/backend/services/controller/vendor/github.com/youmark/pkcs8/.gitignore +++ /dev/null @@ -1,23 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test diff --git a/backend/services/controller/vendor/github.com/youmark/pkcs8/.travis.yml b/backend/services/controller/vendor/github.com/youmark/pkcs8/.travis.yml deleted file mode 100644 index 0bceef6..0000000 --- a/backend/services/controller/vendor/github.com/youmark/pkcs8/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - "1.9.x" - - "1.10.x" - - master - -script: - - go test -v ./... diff --git a/backend/services/controller/vendor/github.com/youmark/pkcs8/LICENSE b/backend/services/controller/vendor/github.com/youmark/pkcs8/LICENSE deleted file mode 100644 index c939f44..0000000 --- a/backend/services/controller/vendor/github.com/youmark/pkcs8/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 youmark - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/backend/services/controller/vendor/github.com/youmark/pkcs8/README b/backend/services/controller/vendor/github.com/youmark/pkcs8/README deleted file mode 100644 index 376fcaf..0000000 --- a/backend/services/controller/vendor/github.com/youmark/pkcs8/README +++ /dev/null @@ -1 +0,0 @@ -pkcs8 package: implement PKCS#8 private key parsing and conversion as defined in RFC5208 and RFC5958 diff --git a/backend/services/controller/vendor/github.com/youmark/pkcs8/README.md b/backend/services/controller/vendor/github.com/youmark/pkcs8/README.md deleted file mode 100644 index f2167db..0000000 --- a/backend/services/controller/vendor/github.com/youmark/pkcs8/README.md +++ /dev/null @@ -1,21 +0,0 @@ -pkcs8 -=== -OpenSSL can generate private keys in both "traditional format" and PKCS#8 format. Newer applications are advised to use more secure PKCS#8 format. Go standard crypto package provides a [function](http://golang.org/pkg/crypto/x509/#ParsePKCS8PrivateKey) to parse private key in PKCS#8 format. There is a limitation to this function. It can only handle unencrypted PKCS#8 private keys. To use this function, the user has to save the private key in file without encryption, which is a bad practice to leave private keys unprotected on file systems. In addition, Go standard package lacks the functions to convert RSA/ECDSA private keys into PKCS#8 format. - -pkcs8 package fills the gap here. It implements functions to process private keys in PKCS#8 format, as defined in [RFC5208](https://tools.ietf.org/html/rfc5208) and [RFC5958](https://tools.ietf.org/html/rfc5958). It can handle both unencrypted PKCS#8 PrivateKeyInfo format and EncryptedPrivateKeyInfo format with PKCS#5 (v2.0) algorithms. - - -[**Godoc**](http://godoc.org/github.com/youmark/pkcs8) - -## Installation -Supports Go 1.9+ - -```text -go get github.com/youmark/pkcs8 -``` -## dependency -This package depends on golang.org/x/crypto/pbkdf2 package. Use the following command to retrive pbkdf2 package -```text -go get golang.org/x/crypto/pbkdf2 -``` - diff --git a/backend/services/controller/vendor/github.com/youmark/pkcs8/pkcs8.go b/backend/services/controller/vendor/github.com/youmark/pkcs8/pkcs8.go deleted file mode 100644 index 9270a79..0000000 --- a/backend/services/controller/vendor/github.com/youmark/pkcs8/pkcs8.go +++ /dev/null @@ -1,305 +0,0 @@ -// Package pkcs8 implements functions to parse and convert private keys in PKCS#8 format, as defined in RFC5208 and RFC5958 -package pkcs8 - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "crypto/x509" - "encoding/asn1" - "errors" - - "golang.org/x/crypto/pbkdf2" -) - -// Copy from crypto/x509 -var ( - oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} - oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} - oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} -) - -// Copy from crypto/x509 -var ( - oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33} - oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} - oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} - oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} -) - -// Copy from crypto/x509 -func oidFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) { - switch curve { - case elliptic.P224(): - return oidNamedCurveP224, true - case elliptic.P256(): - return oidNamedCurveP256, true - case elliptic.P384(): - return oidNamedCurveP384, true - case elliptic.P521(): - return oidNamedCurveP521, true - } - - return nil, false -} - -// Unecrypted PKCS8 -var ( - oidPKCS5PBKDF2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 12} - oidPBES2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 13} - oidAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} - oidAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} - oidHMACWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 2, 9} - oidDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} -) - -type ecPrivateKey struct { - Version int - PrivateKey []byte - NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"` - PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"` -} - -type privateKeyInfo struct { - Version int - PrivateKeyAlgorithm []asn1.ObjectIdentifier - PrivateKey []byte -} - -// Encrypted PKCS8 -type prfParam struct { - IdPRF asn1.ObjectIdentifier - NullParam asn1.RawValue -} - -type pbkdf2Params struct { - Salt []byte - IterationCount int - PrfParam prfParam `asn1:"optional"` -} - -type pbkdf2Algorithms struct { - IdPBKDF2 asn1.ObjectIdentifier - PBKDF2Params pbkdf2Params -} - -type pbkdf2Encs struct { - EncryAlgo asn1.ObjectIdentifier - IV []byte -} - -type pbes2Params struct { - KeyDerivationFunc pbkdf2Algorithms - EncryptionScheme pbkdf2Encs -} - -type pbes2Algorithms struct { - IdPBES2 asn1.ObjectIdentifier - PBES2Params pbes2Params -} - -type encryptedPrivateKeyInfo struct { - EncryptionAlgorithm pbes2Algorithms - EncryptedData []byte -} - -// ParsePKCS8PrivateKeyRSA parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. -// -// The function can decrypt the private key encrypted with AES-256-CBC mode, and stored in PKCS #5 v2.0 format. -func ParsePKCS8PrivateKeyRSA(der []byte, v ...[]byte) (*rsa.PrivateKey, error) { - key, err := ParsePKCS8PrivateKey(der, v...) - if err != nil { - return nil, err - } - typedKey, ok := key.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("key block is not of type RSA") - } - return typedKey, nil -} - -// ParsePKCS8PrivateKeyECDSA parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. -// -// The function can decrypt the private key encrypted with AES-256-CBC mode, and stored in PKCS #5 v2.0 format. -func ParsePKCS8PrivateKeyECDSA(der []byte, v ...[]byte) (*ecdsa.PrivateKey, error) { - key, err := ParsePKCS8PrivateKey(der, v...) - if err != nil { - return nil, err - } - typedKey, ok := key.(*ecdsa.PrivateKey) - if !ok { - return nil, errors.New("key block is not of type ECDSA") - } - return typedKey, nil -} - -// ParsePKCS8PrivateKey parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. -// -// The function can decrypt the private key encrypted with AES-256-CBC mode, and stored in PKCS #5 v2.0 format. -func ParsePKCS8PrivateKey(der []byte, v ...[]byte) (interface{}, error) { - // No password provided, assume the private key is unencrypted - if v == nil { - return x509.ParsePKCS8PrivateKey(der) - } - - // Use the password provided to decrypt the private key - password := v[0] - var privKey encryptedPrivateKeyInfo - if _, err := asn1.Unmarshal(der, &privKey); err != nil { - return nil, errors.New("pkcs8: only PKCS #5 v2.0 supported") - } - - if !privKey.EncryptionAlgorithm.IdPBES2.Equal(oidPBES2) { - return nil, errors.New("pkcs8: only PBES2 supported") - } - - if !privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.IdPBKDF2.Equal(oidPKCS5PBKDF2) { - return nil, errors.New("pkcs8: only PBKDF2 supported") - } - - encParam := privKey.EncryptionAlgorithm.PBES2Params.EncryptionScheme - kdfParam := privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.PBKDF2Params - - iv := encParam.IV - salt := kdfParam.Salt - iter := kdfParam.IterationCount - keyHash := sha1.New - if kdfParam.PrfParam.IdPRF.Equal(oidHMACWithSHA256) { - keyHash = sha256.New - } - - encryptedKey := privKey.EncryptedData - var symkey []byte - var block cipher.Block - var err error - switch { - case encParam.EncryAlgo.Equal(oidAES128CBC): - symkey = pbkdf2.Key(password, salt, iter, 16, keyHash) - block, err = aes.NewCipher(symkey) - case encParam.EncryAlgo.Equal(oidAES256CBC): - symkey = pbkdf2.Key(password, salt, iter, 32, keyHash) - block, err = aes.NewCipher(symkey) - case encParam.EncryAlgo.Equal(oidDESEDE3CBC): - symkey = pbkdf2.Key(password, salt, iter, 24, keyHash) - block, err = des.NewTripleDESCipher(symkey) - default: - return nil, errors.New("pkcs8: only AES-256-CBC, AES-128-CBC and DES-EDE3-CBC are supported") - } - if err != nil { - return nil, err - } - mode := cipher.NewCBCDecrypter(block, iv) - mode.CryptBlocks(encryptedKey, encryptedKey) - - key, err := x509.ParsePKCS8PrivateKey(encryptedKey) - if err != nil { - return nil, errors.New("pkcs8: incorrect password") - } - return key, nil -} - -func convertPrivateKeyToPKCS8(priv interface{}) ([]byte, error) { - var pkey privateKeyInfo - - switch priv := priv.(type) { - case *ecdsa.PrivateKey: - eckey, err := x509.MarshalECPrivateKey(priv) - if err != nil { - return nil, err - } - - oidNamedCurve, ok := oidFromNamedCurve(priv.Curve) - if !ok { - return nil, errors.New("pkcs8: unknown elliptic curve") - } - - // Per RFC5958, if publicKey is present, then version is set to v2(1) else version is set to v1(0). - // But openssl set to v1 even publicKey is present - pkey.Version = 1 - pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 2) - pkey.PrivateKeyAlgorithm[0] = oidPublicKeyECDSA - pkey.PrivateKeyAlgorithm[1] = oidNamedCurve - pkey.PrivateKey = eckey - case *rsa.PrivateKey: - - // Per RFC5958, if publicKey is present, then version is set to v2(1) else version is set to v1(0). - // But openssl set to v1 even publicKey is present - pkey.Version = 0 - pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 1) - pkey.PrivateKeyAlgorithm[0] = oidPublicKeyRSA - pkey.PrivateKey = x509.MarshalPKCS1PrivateKey(priv) - } - - return asn1.Marshal(pkey) -} - -func convertPrivateKeyToPKCS8Encrypted(priv interface{}, password []byte) ([]byte, error) { - // Convert private key into PKCS8 format - pkey, err := convertPrivateKeyToPKCS8(priv) - if err != nil { - return nil, err - } - - // Calculate key from password based on PKCS5 algorithm - // Use 8 byte salt, 16 byte IV, and 2048 iteration - iter := 2048 - salt := make([]byte, 8) - iv := make([]byte, 16) - _, err = rand.Read(salt) - if err != nil { - return nil, err - } - _, err = rand.Read(iv) - if err != nil { - return nil, err - } - - key := pbkdf2.Key(password, salt, iter, 32, sha256.New) - - // Use AES256-CBC mode, pad plaintext with PKCS5 padding scheme - padding := aes.BlockSize - len(pkey)%aes.BlockSize - if padding > 0 { - n := len(pkey) - pkey = append(pkey, make([]byte, padding)...) - for i := 0; i < padding; i++ { - pkey[n+i] = byte(padding) - } - } - - encryptedKey := make([]byte, len(pkey)) - block, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - mode := cipher.NewCBCEncrypter(block, iv) - mode.CryptBlocks(encryptedKey, pkey) - - // pbkdf2algo := pbkdf2Algorithms{oidPKCS5PBKDF2, pbkdf2Params{salt, iter, prfParam{oidHMACWithSHA256}}} - - pbkdf2algo := pbkdf2Algorithms{oidPKCS5PBKDF2, pbkdf2Params{salt, iter, prfParam{oidHMACWithSHA256, asn1.RawValue{Tag: asn1.TagNull}}}} - pbkdf2encs := pbkdf2Encs{oidAES256CBC, iv} - pbes2algo := pbes2Algorithms{oidPBES2, pbes2Params{pbkdf2algo, pbkdf2encs}} - - encryptedPkey := encryptedPrivateKeyInfo{pbes2algo, encryptedKey} - - return asn1.Marshal(encryptedPkey) -} - -// ConvertPrivateKeyToPKCS8 converts the private key into PKCS#8 format. -// To encrypt the private key, the password of []byte type should be provided as the second parameter. -// -// The only supported key types are RSA and ECDSA (*rsa.PublicKey or *ecdsa.PublicKey for priv) -func ConvertPrivateKeyToPKCS8(priv interface{}, v ...[]byte) ([]byte, error) { - if v == nil { - return convertPrivateKeyToPKCS8(priv) - } - - password := string(v[0]) - return convertPrivateKeyToPKCS8Encrypted(priv, []byte(password)) -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/LICENSE b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bson.go deleted file mode 100644 index a0d8185..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bson.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// -// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer -// See THIRD-PARTY-NOTICES for original license terms. - -package bson // import "go.mongodb.org/mongo-driver/bson" - -import ( - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// Zeroer allows custom struct types to implement a report of zero -// state. All struct types that don't implement Zeroer or where IsZero -// returns false are considered to be not zero. -type Zeroer interface { - IsZero() bool -} - -// D is an ordered representation of a BSON document. This type should be used when the order of the elements matters, -// such as MongoDB command documents. If the order of the elements does not matter, an M should be used instead. -// -// A D should not be constructed with duplicate key names, as that can cause undefined server behavior. -// -// Example usage: -// -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} -type D = primitive.D - -// E represents a BSON element for a D. It is usually used inside a D. -type E = primitive.E - -// M is an unordered representation of a BSON document. This type should be used when the order of the elements does not -// matter. This type is handled as a regular map[string]interface{} when encoding and decoding. Elements will be -// serialized in an undefined, random order. If the order of the elements matters, a D should be used instead. -// -// Example usage: -// -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} -type M = primitive.M - -// An A is an ordered representation of a BSON array. -// -// Example usage: -// -// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} -type A = primitive.A diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go deleted file mode 100644 index 4e24f9e..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -// ArrayCodec is the Codec used for bsoncore.Array values. -type ArrayCodec struct{} - -var defaultArrayCodec = NewArrayCodec() - -// NewArrayCodec returns an ArrayCodec. -func NewArrayCodec() *ArrayCodec { - return &ArrayCodec{} -} - -// EncodeValue is the ValueEncoder for bsoncore.Array values. -func (ac *ArrayCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tCoreArray { - return ValueEncoderError{Name: "CoreArrayEncodeValue", Types: []reflect.Type{tCoreArray}, Received: val} - } - - arr := val.Interface().(bsoncore.Array) - return bsonrw.Copier{}.CopyArrayFromBytes(vw, arr) -} - -// DecodeValue is the ValueDecoder for bsoncore.Array values. -func (ac *ArrayCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tCoreArray { - return ValueDecoderError{Name: "CoreArrayDecodeValue", Types: []reflect.Type{tCoreArray}, Received: val} - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, 0)) - } - - val.SetLen(0) - arr, err := bsonrw.Copier{}.AppendArrayBytes(val.Interface().(bsoncore.Array), vr) - val.Set(reflect.ValueOf(arr)) - return err -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go deleted file mode 100644 index 098ed69..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec // import "go.mongodb.org/mongo-driver/bson/bsoncodec" - -import ( - "fmt" - "reflect" - "strings" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -var ( - emptyValue = reflect.Value{} -) - -// Marshaler is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. -type Marshaler interface { - MarshalBSON() ([]byte, error) -} - -// ValueMarshaler is an interface implemented by types that can marshal -// themselves into a BSON value as bytes. The type must be the valid type for -// the bytes returned. The bytes and byte type together must be valid if the -// error is nil. -type ValueMarshaler interface { - MarshalBSONValue() (bsontype.Type, []byte, error) -} - -// Unmarshaler is an interface implemented by types that can unmarshal a BSON -// document representation of themselves. The BSON bytes can be assumed to be -// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data -// after returning. -type Unmarshaler interface { - UnmarshalBSON([]byte) error -} - -// ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. -type ValueUnmarshaler interface { - UnmarshalBSONValue(bsontype.Type, []byte) error -} - -// ValueEncoderError is an error returned from a ValueEncoder when the provided value can't be -// encoded by the ValueEncoder. -type ValueEncoderError struct { - Name string - Types []reflect.Type - Kinds []reflect.Kind - Received reflect.Value -} - -func (vee ValueEncoderError) Error() string { - typeKinds := make([]string, 0, len(vee.Types)+len(vee.Kinds)) - for _, t := range vee.Types { - typeKinds = append(typeKinds, t.String()) - } - for _, k := range vee.Kinds { - if k == reflect.Map { - typeKinds = append(typeKinds, "map[string]*") - continue - } - typeKinds = append(typeKinds, k.String()) - } - received := vee.Received.Kind().String() - if vee.Received.IsValid() { - received = vee.Received.Type().String() - } - return fmt.Sprintf("%s can only encode valid %s, but got %s", vee.Name, strings.Join(typeKinds, ", "), received) -} - -// ValueDecoderError is an error returned from a ValueDecoder when the provided value can't be -// decoded by the ValueDecoder. -type ValueDecoderError struct { - Name string - Types []reflect.Type - Kinds []reflect.Kind - Received reflect.Value -} - -func (vde ValueDecoderError) Error() string { - typeKinds := make([]string, 0, len(vde.Types)+len(vde.Kinds)) - for _, t := range vde.Types { - typeKinds = append(typeKinds, t.String()) - } - for _, k := range vde.Kinds { - if k == reflect.Map { - typeKinds = append(typeKinds, "map[string]*") - continue - } - typeKinds = append(typeKinds, k.String()) - } - received := vde.Received.Kind().String() - if vde.Received.IsValid() { - received = vde.Received.Type().String() - } - return fmt.Sprintf("%s can only decode valid and settable %s, but got %s", vde.Name, strings.Join(typeKinds, ", "), received) -} - -// EncodeContext is the contextual information required for a Codec to encode a -// value. -type EncodeContext struct { - *Registry - MinSize bool -} - -// DecodeContext is the contextual information required for a Codec to decode a -// value. -type DecodeContext struct { - *Registry - Truncate bool - - // Ancestor is the type of a containing document. This is mainly used to determine what type - // should be used when decoding an embedded document into an empty interface. For example, if - // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface - // will be decoded into a bson.M. - // - // Deprecated: Use DefaultDocumentM or DefaultDocumentD instead. - Ancestor reflect.Type - - // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the - // usage for this field is restricted to data typed as "interface{}" or "map[string]interface{}". If DocumentType is - // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an - // error. DocumentType overrides the Ancestor field. - defaultDocumentType reflect.Type -} - -// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". -func (dc *DecodeContext) DefaultDocumentM() { - dc.defaultDocumentType = reflect.TypeOf(primitive.M{}) -} - -// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". -func (dc *DecodeContext) DefaultDocumentD() { - dc.defaultDocumentType = reflect.TypeOf(primitive.D{}) -} - -// ValueCodec is the interface that groups the methods to encode and decode -// values. -type ValueCodec interface { - ValueEncoder - ValueDecoder -} - -// ValueEncoder is the interface implemented by types that can handle the encoding of a value. -type ValueEncoder interface { - EncodeValue(EncodeContext, bsonrw.ValueWriter, reflect.Value) error -} - -// ValueEncoderFunc is an adapter function that allows a function with the correct signature to be -// used as a ValueEncoder. -type ValueEncoderFunc func(EncodeContext, bsonrw.ValueWriter, reflect.Value) error - -// EncodeValue implements the ValueEncoder interface. -func (fn ValueEncoderFunc) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - return fn(ec, vw, val) -} - -// ValueDecoder is the interface implemented by types that can handle the decoding of a value. -type ValueDecoder interface { - DecodeValue(DecodeContext, bsonrw.ValueReader, reflect.Value) error -} - -// ValueDecoderFunc is an adapter function that allows a function with the correct signature to be -// used as a ValueDecoder. -type ValueDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) error - -// DecodeValue implements the ValueDecoder interface. -func (fn ValueDecoderFunc) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - return fn(dc, vr, val) -} - -// typeDecoder is the interface implemented by types that can handle the decoding of a value given its type. -type typeDecoder interface { - decodeType(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error) -} - -// typeDecoderFunc is an adapter function that allows a function with the correct signature to be used as a typeDecoder. -type typeDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error) - -func (fn typeDecoderFunc) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - return fn(dc, vr, t) -} - -// decodeAdapter allows two functions with the correct signatures to be used as both a ValueDecoder and typeDecoder. -type decodeAdapter struct { - ValueDecoderFunc - typeDecoderFunc -} - -var _ ValueDecoder = decodeAdapter{} -var _ typeDecoder = decodeAdapter{} - -// decodeTypeOrValue calls decoder.decodeType is decoder is a typeDecoder. Otherwise, it allocates a new element of type -// t and calls decoder.DecodeValue on it. -func decodeTypeOrValue(decoder ValueDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - td, _ := decoder.(typeDecoder) - return decodeTypeOrValueWithInfo(decoder, td, dc, vr, t, true) -} - -func decodeTypeOrValueWithInfo(vd ValueDecoder, td typeDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type, convert bool) (reflect.Value, error) { - if td != nil { - val, err := td.decodeType(dc, vr, t) - if err == nil && convert && val.Type() != t { - // This conversion step is necessary for slices and maps. If a user declares variables like: - // - // type myBool bool - // var m map[string]myBool - // - // and tries to decode BSON bytes into the map, the decoding will fail if this conversion is not present - // because we'll try to assign a value of type bool to one of type myBool. - val = val.Convert(t) - } - return val, err - } - - val := reflect.New(t).Elem() - err := vd.DecodeValue(dc, vr, val) - return val, err -} - -// CodecZeroer is the interface implemented by Codecs that can also determine if -// a value of the type that would be encoded is zero. -type CodecZeroer interface { - IsTypeZero(interface{}) bool -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go deleted file mode 100644 index 5a916cc..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// ByteSliceCodec is the Codec used for []byte values. -type ByteSliceCodec struct { - EncodeNilAsEmpty bool -} - -var ( - defaultByteSliceCodec = NewByteSliceCodec() - - _ ValueCodec = defaultByteSliceCodec - _ typeDecoder = defaultByteSliceCodec -) - -// NewByteSliceCodec returns a StringCodec with options opts. -func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec { - byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...) - codec := ByteSliceCodec{} - if byteSliceOpt.EncodeNilAsEmpty != nil { - codec.EncodeNilAsEmpty = *byteSliceOpt.EncodeNilAsEmpty - } - return &codec -} - -// EncodeValue is the ValueEncoder for []byte. -func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tByteSlice { - return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - if val.IsNil() && !bsc.EncodeNilAsEmpty { - return vw.WriteNull() - } - return vw.WriteBinary(val.Interface().([]byte)) -} - -func (bsc *ByteSliceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tByteSlice { - return emptyValue, ValueDecoderError{ - Name: "ByteSliceDecodeValue", - Types: []reflect.Type{tByteSlice}, - Received: reflect.Zero(t), - } - } - - var data []byte - var err error - switch vrType := vr.Type(); vrType { - case bsontype.String: - str, err := vr.ReadString() - if err != nil { - return emptyValue, err - } - data = []byte(str) - case bsontype.Symbol: - sym, err := vr.ReadSymbol() - if err != nil { - return emptyValue, err - } - data = []byte(sym) - case bsontype.Binary: - var subtype byte - data, subtype, err = vr.ReadBinary() - if err != nil { - return emptyValue, err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return emptyValue, decodeBinaryError{subtype: subtype, typeName: "[]byte"} - } - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a []byte", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(data), nil -} - -// DecodeValue is the ValueDecoder for []byte. -func (bsc *ByteSliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tByteSlice { - return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - - elem, err := bsc.decodeType(dc, vr, tByteSlice) - if err != nil { - return err - } - - val.Set(elem) - return nil -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go deleted file mode 100644 index cb8180f..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonrw" -) - -// condAddrEncoder is the encoder used when a pointer to the encoding value has an encoder. -type condAddrEncoder struct { - canAddrEnc ValueEncoder - elseEnc ValueEncoder -} - -var _ ValueEncoder = (*condAddrEncoder)(nil) - -// newCondAddrEncoder returns an condAddrEncoder. -func newCondAddrEncoder(canAddrEnc, elseEnc ValueEncoder) *condAddrEncoder { - encoder := condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} - return &encoder -} - -// EncodeValue is the ValueEncoderFunc for a value that may be addressable. -func (cae *condAddrEncoder) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.CanAddr() { - return cae.canAddrEnc.EncodeValue(ec, vw, val) - } - if cae.elseEnc != nil { - return cae.elseEnc.EncodeValue(ec, vw, val) - } - return ErrNoEncoder{Type: val.Type()} -} - -// condAddrDecoder is the decoder used when a pointer to the value has a decoder. -type condAddrDecoder struct { - canAddrDec ValueDecoder - elseDec ValueDecoder -} - -var _ ValueDecoder = (*condAddrDecoder)(nil) - -// newCondAddrDecoder returns an CondAddrDecoder. -func newCondAddrDecoder(canAddrDec, elseDec ValueDecoder) *condAddrDecoder { - decoder := condAddrDecoder{canAddrDec: canAddrDec, elseDec: elseDec} - return &decoder -} - -// DecodeValue is the ValueDecoderFunc for a value that may be addressable. -func (cad *condAddrDecoder) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if val.CanAddr() { - return cad.canAddrDec.DecodeValue(dc, vr, val) - } - if cad.elseDec != nil { - return cad.elseDec.DecodeValue(dc, vr, val) - } - return ErrNoDecoder{Type: val.Type()} -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go deleted file mode 100644 index e95cab5..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ /dev/null @@ -1,1729 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "net/url" - "reflect" - "strconv" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -var ( - defaultValueDecoders DefaultValueDecoders - errCannotTruncate = errors.New("float64 can only be truncated to an integer type when truncation is enabled") -) - -type decodeBinaryError struct { - subtype byte - typeName string -} - -func (d decodeBinaryError) Error() string { - return fmt.Sprintf("only binary values with subtype 0x00 or 0x02 can be decoded into %s, but got subtype %v", d.typeName, d.subtype) -} - -func newDefaultStructCodec() *StructCodec { - codec, err := NewStructCodec(DefaultStructTagParser) - if err != nil { - // This function is called from the codec registration path, so errors can't be propagated. If there's an error - // constructing the StructCodec, we panic to avoid losing it. - panic(fmt.Errorf("error creating default StructCodec: %v", err)) - } - return codec -} - -// DefaultValueDecoders is a namespace type for the default ValueDecoders used -// when creating a registry. -type DefaultValueDecoders struct{} - -// RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with -// the provided RegistryBuilder. -// -// There is no support for decoding map[string]interface{} because there is no decoder for -// interface{}, so users must either register this decoder themselves or use the -// EmptyInterfaceDecoder available in the bson package. -func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { - if rb == nil { - panic(errors.New("argument to RegisterDefaultDecoders must not be nil")) - } - - intDecoder := decodeAdapter{dvd.IntDecodeValue, dvd.intDecodeType} - floatDecoder := decodeAdapter{dvd.FloatDecodeValue, dvd.floatDecodeType} - - rb. - RegisterTypeDecoder(tD, ValueDecoderFunc(dvd.DDecodeValue)). - RegisterTypeDecoder(tBinary, decodeAdapter{dvd.BinaryDecodeValue, dvd.binaryDecodeType}). - RegisterTypeDecoder(tUndefined, decodeAdapter{dvd.UndefinedDecodeValue, dvd.undefinedDecodeType}). - RegisterTypeDecoder(tDateTime, decodeAdapter{dvd.DateTimeDecodeValue, dvd.dateTimeDecodeType}). - RegisterTypeDecoder(tNull, decodeAdapter{dvd.NullDecodeValue, dvd.nullDecodeType}). - RegisterTypeDecoder(tRegex, decodeAdapter{dvd.RegexDecodeValue, dvd.regexDecodeType}). - RegisterTypeDecoder(tDBPointer, decodeAdapter{dvd.DBPointerDecodeValue, dvd.dBPointerDecodeType}). - RegisterTypeDecoder(tTimestamp, decodeAdapter{dvd.TimestampDecodeValue, dvd.timestampDecodeType}). - RegisterTypeDecoder(tMinKey, decodeAdapter{dvd.MinKeyDecodeValue, dvd.minKeyDecodeType}). - RegisterTypeDecoder(tMaxKey, decodeAdapter{dvd.MaxKeyDecodeValue, dvd.maxKeyDecodeType}). - RegisterTypeDecoder(tJavaScript, decodeAdapter{dvd.JavaScriptDecodeValue, dvd.javaScriptDecodeType}). - RegisterTypeDecoder(tSymbol, decodeAdapter{dvd.SymbolDecodeValue, dvd.symbolDecodeType}). - RegisterTypeDecoder(tByteSlice, defaultByteSliceCodec). - RegisterTypeDecoder(tTime, defaultTimeCodec). - RegisterTypeDecoder(tEmpty, defaultEmptyInterfaceCodec). - RegisterTypeDecoder(tCoreArray, defaultArrayCodec). - RegisterTypeDecoder(tOID, decodeAdapter{dvd.ObjectIDDecodeValue, dvd.objectIDDecodeType}). - RegisterTypeDecoder(tDecimal, decodeAdapter{dvd.Decimal128DecodeValue, dvd.decimal128DecodeType}). - RegisterTypeDecoder(tJSONNumber, decodeAdapter{dvd.JSONNumberDecodeValue, dvd.jsonNumberDecodeType}). - RegisterTypeDecoder(tURL, decodeAdapter{dvd.URLDecodeValue, dvd.urlDecodeType}). - RegisterTypeDecoder(tCoreDocument, ValueDecoderFunc(dvd.CoreDocumentDecodeValue)). - RegisterTypeDecoder(tCodeWithScope, decodeAdapter{dvd.CodeWithScopeDecodeValue, dvd.codeWithScopeDecodeType}). - RegisterDefaultDecoder(reflect.Bool, decodeAdapter{dvd.BooleanDecodeValue, dvd.booleanDecodeType}). - RegisterDefaultDecoder(reflect.Int, intDecoder). - RegisterDefaultDecoder(reflect.Int8, intDecoder). - RegisterDefaultDecoder(reflect.Int16, intDecoder). - RegisterDefaultDecoder(reflect.Int32, intDecoder). - RegisterDefaultDecoder(reflect.Int64, intDecoder). - RegisterDefaultDecoder(reflect.Uint, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint8, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint16, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint32, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint64, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Float32, floatDecoder). - RegisterDefaultDecoder(reflect.Float64, floatDecoder). - RegisterDefaultDecoder(reflect.Array, ValueDecoderFunc(dvd.ArrayDecodeValue)). - RegisterDefaultDecoder(reflect.Map, defaultMapCodec). - RegisterDefaultDecoder(reflect.Slice, defaultSliceCodec). - RegisterDefaultDecoder(reflect.String, defaultStringCodec). - RegisterDefaultDecoder(reflect.Struct, newDefaultStructCodec()). - RegisterDefaultDecoder(reflect.Ptr, NewPointerCodec()). - RegisterTypeMapEntry(bsontype.Double, tFloat64). - RegisterTypeMapEntry(bsontype.String, tString). - RegisterTypeMapEntry(bsontype.Array, tA). - RegisterTypeMapEntry(bsontype.Binary, tBinary). - RegisterTypeMapEntry(bsontype.Undefined, tUndefined). - RegisterTypeMapEntry(bsontype.ObjectID, tOID). - RegisterTypeMapEntry(bsontype.Boolean, tBool). - RegisterTypeMapEntry(bsontype.DateTime, tDateTime). - RegisterTypeMapEntry(bsontype.Regex, tRegex). - RegisterTypeMapEntry(bsontype.DBPointer, tDBPointer). - RegisterTypeMapEntry(bsontype.JavaScript, tJavaScript). - RegisterTypeMapEntry(bsontype.Symbol, tSymbol). - RegisterTypeMapEntry(bsontype.CodeWithScope, tCodeWithScope). - RegisterTypeMapEntry(bsontype.Int32, tInt32). - RegisterTypeMapEntry(bsontype.Int64, tInt64). - RegisterTypeMapEntry(bsontype.Timestamp, tTimestamp). - RegisterTypeMapEntry(bsontype.Decimal128, tDecimal). - RegisterTypeMapEntry(bsontype.MinKey, tMinKey). - RegisterTypeMapEntry(bsontype.MaxKey, tMaxKey). - RegisterTypeMapEntry(bsontype.Type(0), tD). - RegisterTypeMapEntry(bsontype.EmbeddedDocument, tD). - RegisterHookDecoder(tValueUnmarshaler, ValueDecoderFunc(dvd.ValueUnmarshalerDecodeValue)). - RegisterHookDecoder(tUnmarshaler, ValueDecoderFunc(dvd.UnmarshalerDecodeValue)) -} - -// DDecodeValue is the ValueDecoderFunc for primitive.D instances. -func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || !val.CanSet() || val.Type() != tD { - return ValueDecoderError{Name: "DDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - dc.Ancestor = tD - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return fmt.Errorf("cannot decode %v into a primitive.D", vrType) - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - decoder, err := dc.LookupDecoder(tEmpty) - if err != nil { - return err - } - tEmptyTypeDecoder, _ := decoder.(typeDecoder) - - // Use the elements in the provided value if it's non nil. Otherwise, allocate a new D instance. - var elems primitive.D - if !val.IsNil() { - val.SetLen(0) - elems = val.Interface().(primitive.D) - } else { - elems = make(primitive.D, 0) - } - - for { - key, elemVr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { - break - } else if err != nil { - return err - } - - // Pass false for convert because we don't need to call reflect.Value.Convert for tEmpty. - elem, err := decodeTypeOrValueWithInfo(decoder, tEmptyTypeDecoder, dc, elemVr, tEmpty, false) - if err != nil { - return err - } - - elems = append(elems, primitive.E{Key: key, Value: elem.Interface()}) - } - - val.Set(reflect.ValueOf(elems)) - return nil -} - -func (dvd DefaultValueDecoders) booleanDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t.Kind() != reflect.Bool { - return emptyValue, ValueDecoderError{ - Name: "BooleanDecodeValue", - Kinds: []reflect.Kind{reflect.Bool}, - Received: reflect.Zero(t), - } - } - - var b bool - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - b = (i32 != 0) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - b = (i64 != 0) - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - b = (f64 != 0) - case bsontype.Boolean: - b, err = vr.ReadBoolean() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a boolean", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(b), nil -} - -// BooleanDecodeValue is the ValueDecoderFunc for bool types. -func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool { - return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} - } - - elem, err := dvd.booleanDecodeType(dctx, vr, val.Type()) - if err != nil { - return err - } - - val.SetBool(elem.Bool()) - return nil -} - -func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - var i64 int64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - i64 = int64(i32) - case bsontype.Int64: - i64, err = vr.ReadInt64() - if err != nil { - return emptyValue, err - } - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - if !dc.Truncate && math.Floor(f64) != f64 { - return emptyValue, errCannotTruncate - } - if f64 > float64(math.MaxInt64) { - return emptyValue, fmt.Errorf("%g overflows int64", f64) - } - i64 = int64(f64) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return emptyValue, err - } - if b { - i64 = 1 - } - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) - } - - switch t.Kind() { - case reflect.Int8: - if i64 < math.MinInt8 || i64 > math.MaxInt8 { - return emptyValue, fmt.Errorf("%d overflows int8", i64) - } - - return reflect.ValueOf(int8(i64)), nil - case reflect.Int16: - if i64 < math.MinInt16 || i64 > math.MaxInt16 { - return emptyValue, fmt.Errorf("%d overflows int16", i64) - } - - return reflect.ValueOf(int16(i64)), nil - case reflect.Int32: - if i64 < math.MinInt32 || i64 > math.MaxInt32 { - return emptyValue, fmt.Errorf("%d overflows int32", i64) - } - - return reflect.ValueOf(int32(i64)), nil - case reflect.Int64: - return reflect.ValueOf(i64), nil - case reflect.Int: - if int64(int(i64)) != i64 { // Can we fit this inside of an int - return emptyValue, fmt.Errorf("%d overflows int", i64) - } - - return reflect.ValueOf(int(i64)), nil - default: - return emptyValue, ValueDecoderError{ - Name: "IntDecodeValue", - Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, - Received: reflect.Zero(t), - } - } -} - -// IntDecodeValue is the ValueDecoderFunc for int types. -func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() { - return ValueDecoderError{ - Name: "IntDecodeValue", - Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, - Received: val, - } - } - - elem, err := dvd.intDecodeType(dc, vr, val.Type()) - if err != nil { - return err - } - - val.SetInt(elem.Int()) - return nil -} - -// UintDecodeValue is the ValueDecoderFunc for uint types. -// -// Deprecated: UintDecodeValue is not registered by default. Use UintCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - var i64 int64 - var err error - switch vr.Type() { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return err - } - i64 = int64(i32) - case bsontype.Int64: - i64, err = vr.ReadInt64() - if err != nil { - return err - } - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return err - } - if !dc.Truncate && math.Floor(f64) != f64 { - return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled") - } - if f64 > float64(math.MaxInt64) { - return fmt.Errorf("%g overflows int64", f64) - } - i64 = int64(f64) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return err - } - if b { - i64 = 1 - } - default: - return fmt.Errorf("cannot decode %v into an integer type", vr.Type()) - } - - if !val.CanSet() { - return ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } - } - - switch val.Kind() { - case reflect.Uint8: - if i64 < 0 || i64 > math.MaxUint8 { - return fmt.Errorf("%d overflows uint8", i64) - } - case reflect.Uint16: - if i64 < 0 || i64 > math.MaxUint16 { - return fmt.Errorf("%d overflows uint16", i64) - } - case reflect.Uint32: - if i64 < 0 || i64 > math.MaxUint32 { - return fmt.Errorf("%d overflows uint32", i64) - } - case reflect.Uint64: - if i64 < 0 { - return fmt.Errorf("%d overflows uint64", i64) - } - case reflect.Uint: - if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint - return fmt.Errorf("%d overflows uint", i64) - } - default: - return ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } - } - - val.SetUint(uint64(i64)) - return nil -} - -func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - var f float64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - f = float64(i32) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - f = float64(i64) - case bsontype.Double: - f, err = vr.ReadDouble() - if err != nil { - return emptyValue, err - } - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return emptyValue, err - } - if b { - f = 1 - } - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into a float32 or float64 type", vrType) - } - - switch t.Kind() { - case reflect.Float32: - if !ec.Truncate && float64(float32(f)) != f { - return emptyValue, errCannotTruncate - } - - return reflect.ValueOf(float32(f)), nil - case reflect.Float64: - return reflect.ValueOf(f), nil - default: - return emptyValue, ValueDecoderError{ - Name: "FloatDecodeValue", - Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, - Received: reflect.Zero(t), - } - } -} - -// FloatDecodeValue is the ValueDecoderFunc for float types. -func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() { - return ValueDecoderError{ - Name: "FloatDecodeValue", - Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, - Received: val, - } - } - - elem, err := dvd.floatDecodeType(ec, vr, val.Type()) - if err != nil { - return err - } - - val.SetFloat(elem.Float()) - return nil -} - -// StringDecodeValue is the ValueDecoderFunc for string types. -// -// Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - var str string - var err error - switch vr.Type() { - // TODO(GODRIVER-577): Handle JavaScript and Symbol BSON types when allowed. - case bsontype.String: - str, err = vr.ReadString() - if err != nil { - return err - } - default: - return fmt.Errorf("cannot decode %v into a string type", vr.Type()) - } - if !val.CanSet() || val.Kind() != reflect.String { - return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} - } - - val.SetString(str) - return nil -} - -func (DefaultValueDecoders) javaScriptDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tJavaScript { - return emptyValue, ValueDecoderError{ - Name: "JavaScriptDecodeValue", - Types: []reflect.Type{tJavaScript}, - Received: reflect.Zero(t), - } - } - - var js string - var err error - switch vrType := vr.Type(); vrType { - case bsontype.JavaScript: - js, err = vr.ReadJavascript() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.JavaScript", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.JavaScript(js)), nil -} - -// JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type. -func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tJavaScript { - return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val} - } - - elem, err := dvd.javaScriptDecodeType(dctx, vr, tJavaScript) - if err != nil { - return err - } - - val.SetString(elem.String()) - return nil -} - -func (DefaultValueDecoders) symbolDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tSymbol { - return emptyValue, ValueDecoderError{ - Name: "SymbolDecodeValue", - Types: []reflect.Type{tSymbol}, - Received: reflect.Zero(t), - } - } - - var symbol string - var err error - switch vrType := vr.Type(); vrType { - case bsontype.String: - symbol, err = vr.ReadString() - case bsontype.Symbol: - symbol, err = vr.ReadSymbol() - case bsontype.Binary: - data, subtype, err := vr.ReadBinary() - if err != nil { - return emptyValue, err - } - - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return emptyValue, decodeBinaryError{subtype: subtype, typeName: "primitive.Symbol"} - } - symbol = string(data) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Symbol", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Symbol(symbol)), nil -} - -// SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type. -func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tSymbol { - return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val} - } - - elem, err := dvd.symbolDecodeType(dctx, vr, tSymbol) - if err != nil { - return err - } - - val.SetString(elem.String()) - return nil -} - -func (DefaultValueDecoders) binaryDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tBinary { - return emptyValue, ValueDecoderError{ - Name: "BinaryDecodeValue", - Types: []reflect.Type{tBinary}, - Received: reflect.Zero(t), - } - } - - var data []byte - var subtype byte - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Binary: - data, subtype, err = vr.ReadBinary() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Binary", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Binary{Subtype: subtype, Data: data}), nil -} - -// BinaryDecodeValue is the ValueDecoderFunc for Binary. -func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tBinary { - return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val} - } - - elem, err := dvd.binaryDecodeType(dc, vr, tBinary) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) undefinedDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tUndefined { - return emptyValue, ValueDecoderError{ - Name: "UndefinedDecodeValue", - Types: []reflect.Type{tUndefined}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Undefined: - err = vr.ReadUndefined() - case bsontype.Null: - err = vr.ReadNull() - default: - return emptyValue, fmt.Errorf("cannot decode %v into an Undefined", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Undefined{}), nil -} - -// UndefinedDecodeValue is the ValueDecoderFunc for Undefined. -func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tUndefined { - return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val} - } - - elem, err := dvd.undefinedDecodeType(dc, vr, tUndefined) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// Accept both 12-byte string and pretty-printed 24-byte hex string formats. -func (dvd DefaultValueDecoders) objectIDDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tOID { - return emptyValue, ValueDecoderError{ - Name: "ObjectIDDecodeValue", - Types: []reflect.Type{tOID}, - Received: reflect.Zero(t), - } - } - - var oid primitive.ObjectID - var err error - switch vrType := vr.Type(); vrType { - case bsontype.ObjectID: - oid, err = vr.ReadObjectID() - if err != nil { - return emptyValue, err - } - case bsontype.String: - str, err := vr.ReadString() - if err != nil { - return emptyValue, err - } - if oid, err = primitive.ObjectIDFromHex(str); err == nil { - break - } - if len(str) != 12 { - return emptyValue, fmt.Errorf("an ObjectID string must be exactly 12 bytes long (got %v)", len(str)) - } - byteArr := []byte(str) - copy(oid[:], byteArr) - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into an ObjectID", vrType) - } - - return reflect.ValueOf(oid), nil -} - -// ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID. -func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tOID { - return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val} - } - - elem, err := dvd.objectIDDecodeType(dc, vr, tOID) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) dateTimeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tDateTime { - return emptyValue, ValueDecoderError{ - Name: "DateTimeDecodeValue", - Types: []reflect.Type{tDateTime}, - Received: reflect.Zero(t), - } - } - - var dt int64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.DateTime: - dt, err = vr.ReadDateTime() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a DateTime", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.DateTime(dt)), nil -} - -// DateTimeDecodeValue is the ValueDecoderFunc for DateTime. -func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDateTime { - return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val} - } - - elem, err := dvd.dateTimeDecodeType(dc, vr, tDateTime) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) nullDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tNull { - return emptyValue, ValueDecoderError{ - Name: "NullDecodeValue", - Types: []reflect.Type{tNull}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Undefined: - err = vr.ReadUndefined() - case bsontype.Null: - err = vr.ReadNull() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Null", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Null{}), nil -} - -// NullDecodeValue is the ValueDecoderFunc for Null. -func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tNull { - return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val} - } - - elem, err := dvd.nullDecodeType(dc, vr, tNull) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) regexDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tRegex { - return emptyValue, ValueDecoderError{ - Name: "RegexDecodeValue", - Types: []reflect.Type{tRegex}, - Received: reflect.Zero(t), - } - } - - var pattern, options string - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Regex: - pattern, options, err = vr.ReadRegex() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Regex", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Regex{Pattern: pattern, Options: options}), nil -} - -// RegexDecodeValue is the ValueDecoderFunc for Regex. -func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tRegex { - return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val} - } - - elem, err := dvd.regexDecodeType(dc, vr, tRegex) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) dBPointerDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tDBPointer { - return emptyValue, ValueDecoderError{ - Name: "DBPointerDecodeValue", - Types: []reflect.Type{tDBPointer}, - Received: reflect.Zero(t), - } - } - - var ns string - var pointer primitive.ObjectID - var err error - switch vrType := vr.Type(); vrType { - case bsontype.DBPointer: - ns, pointer, err = vr.ReadDBPointer() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a DBPointer", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.DBPointer{DB: ns, Pointer: pointer}), nil -} - -// DBPointerDecodeValue is the ValueDecoderFunc for DBPointer. -func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDBPointer { - return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val} - } - - elem, err := dvd.dBPointerDecodeType(dc, vr, tDBPointer) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) timestampDecodeType(dc DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) { - if reflectType != tTimestamp { - return emptyValue, ValueDecoderError{ - Name: "TimestampDecodeValue", - Types: []reflect.Type{tTimestamp}, - Received: reflect.Zero(reflectType), - } - } - - var t, incr uint32 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Timestamp: - t, incr, err = vr.ReadTimestamp() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Timestamp", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Timestamp{T: t, I: incr}), nil -} - -// TimestampDecodeValue is the ValueDecoderFunc for Timestamp. -func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tTimestamp { - return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val} - } - - elem, err := dvd.timestampDecodeType(dc, vr, tTimestamp) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) minKeyDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tMinKey { - return emptyValue, ValueDecoderError{ - Name: "MinKeyDecodeValue", - Types: []reflect.Type{tMinKey}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.MinKey: - err = vr.ReadMinKey() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a MinKey", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.MinKey{}), nil -} - -// MinKeyDecodeValue is the ValueDecoderFunc for MinKey. -func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tMinKey { - return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val} - } - - elem, err := dvd.minKeyDecodeType(dc, vr, tMinKey) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) maxKeyDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tMaxKey { - return emptyValue, ValueDecoderError{ - Name: "MaxKeyDecodeValue", - Types: []reflect.Type{tMaxKey}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.MaxKey: - err = vr.ReadMaxKey() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a MaxKey", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.MaxKey{}), nil -} - -// MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey. -func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tMaxKey { - return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val} - } - - elem, err := dvd.maxKeyDecodeType(dc, vr, tMaxKey) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) decimal128DecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tDecimal { - return emptyValue, ValueDecoderError{ - Name: "Decimal128DecodeValue", - Types: []reflect.Type{tDecimal}, - Received: reflect.Zero(t), - } - } - - var d128 primitive.Decimal128 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Decimal128: - d128, err = vr.ReadDecimal128() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Decimal128", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(d128), nil -} - -// Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128. -func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDecimal { - return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val} - } - - elem, err := dvd.decimal128DecodeType(dctx, vr, tDecimal) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) jsonNumberDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tJSONNumber { - return emptyValue, ValueDecoderError{ - Name: "JSONNumberDecodeValue", - Types: []reflect.Type{tJSONNumber}, - Received: reflect.Zero(t), - } - } - - var jsonNum json.Number - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - jsonNum = json.Number(strconv.FormatFloat(f64, 'f', -1, 64)) - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - jsonNum = json.Number(strconv.FormatInt(int64(i32), 10)) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - jsonNum = json.Number(strconv.FormatInt(i64, 10)) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a json.Number", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(jsonNum), nil -} - -// JSONNumberDecodeValue is the ValueDecoderFunc for json.Number. -func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tJSONNumber { - return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} - } - - elem, err := dvd.jsonNumberDecodeType(dc, vr, tJSONNumber) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) urlDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tURL { - return emptyValue, ValueDecoderError{ - Name: "URLDecodeValue", - Types: []reflect.Type{tURL}, - Received: reflect.Zero(t), - } - } - - urlPtr := &url.URL{} - var err error - switch vrType := vr.Type(); vrType { - case bsontype.String: - var str string // Declare str here to avoid shadowing err during the ReadString call. - str, err = vr.ReadString() - if err != nil { - return emptyValue, err - } - - urlPtr, err = url.Parse(str) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a *url.URL", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(urlPtr).Elem(), nil -} - -// URLDecodeValue is the ValueDecoderFunc for url.URL. -func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tURL { - return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val} - } - - elem, err := dvd.urlDecodeType(dc, vr, tURL) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// TimeDecodeValue is the ValueDecoderFunc for time.Time. -// -// Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if vr.Type() != bsontype.DateTime { - return fmt.Errorf("cannot decode %v into a time.Time", vr.Type()) - } - - dt, err := vr.ReadDateTime() - if err != nil { - return err - } - - if !val.CanSet() || val.Type() != tTime { - return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} - } - - val.Set(reflect.ValueOf(time.Unix(dt/1000, dt%1000*1000000).UTC())) - return nil -} - -// ByteSliceDecodeValue is the ValueDecoderFunc for []byte. -// -// Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) ByteSliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null { - return fmt.Errorf("cannot decode %v into a []byte", vr.Type()) - } - - if !val.CanSet() || val.Type() != tByteSlice { - return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - - if vr.Type() == bsontype.Null { - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - } - - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - if subtype != 0x00 { - return fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 for %s, got %v", bsontype.Binary, subtype) - } - - val.Set(reflect.ValueOf(data)) - return nil -} - -// MapDecodeValue is the ValueDecoderFunc for map[string]* types. -// -// Deprecated: MapDecodeValue is not registered by default. Use MapCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { - return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - switch vr.Type() { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type()) - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeMap(val.Type())) - } - - eType := val.Type().Elem() - decoder, err := dc.LookupDecoder(eType) - if err != nil { - return err - } - - if eType == tEmpty { - dc.Ancestor = val.Type() - } - - keyType := val.Type().Key() - for { - key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { - break - } - if err != nil { - return err - } - - elem := reflect.New(eType).Elem() - - err = decoder.DecodeValue(dc, vr, elem) - if err != nil { - return err - } - - val.SetMapIndex(reflect.ValueOf(key).Convert(keyType), elem) - } - return nil -} - -// ArrayDecodeValue is the ValueDecoderFunc for array types. -func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Array { - return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Array: - case bsontype.Type(0), bsontype.EmbeddedDocument: - if val.Type().Elem() != tE { - return fmt.Errorf("cannot decode document into %s", val.Type()) - } - case bsontype.Binary: - if val.Type().Elem() != tByte { - return fmt.Errorf("ArrayDecodeValue can only be used to decode binary into a byte array, got %v", vrType) - } - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return fmt.Errorf("ArrayDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) - } - - if len(data) > val.Len() { - return fmt.Errorf("more elements returned in array than can fit inside %s", val.Type()) - } - - for idx, elem := range data { - val.Index(idx).Set(reflect.ValueOf(elem)) - } - return nil - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Undefined: - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - default: - return fmt.Errorf("cannot decode %v into an array", vrType) - } - - var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) - switch val.Type().Elem() { - case tE: - elemsFunc = dvd.decodeD - default: - elemsFunc = dvd.decodeDefault - } - - elems, err := elemsFunc(dc, vr, val) - if err != nil { - return err - } - - if len(elems) > val.Len() { - return fmt.Errorf("more elements returned in array than can fit inside %s, got %v elements", val.Type(), len(elems)) - } - - for idx, elem := range elems { - val.Index(idx).Set(elem) - } - - return nil -} - -// SliceDecodeValue is the ValueDecoderFunc for slice types. -// -// Deprecated: SliceDecodeValue is not registered by default. Use SliceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Slice { - return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vr.Type() { - case bsontype.Array: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Type(0), bsontype.EmbeddedDocument: - if val.Type().Elem() != tE { - return fmt.Errorf("cannot decode document into %s", val.Type()) - } - default: - return fmt.Errorf("cannot decode %v into a slice", vr.Type()) - } - - var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) - switch val.Type().Elem() { - case tE: - dc.Ancestor = val.Type() - elemsFunc = dvd.decodeD - default: - elemsFunc = dvd.decodeDefault - } - - elems, err := elemsFunc(dc, vr, val) - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) - } - - val.SetLen(0) - val.Set(reflect.Append(val, elems...)) - - return nil -} - -// ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations. -func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) { - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - - if val.Kind() == reflect.Ptr && val.IsNil() { - if !val.CanSet() { - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - val.Set(reflect.New(val.Type().Elem())) - } - - if !val.Type().Implements(tValueUnmarshaler) { - if !val.CanAddr() { - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. - } - - t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) - if err != nil { - return err - } - - fn := val.Convert(tValueUnmarshaler).MethodByName("UnmarshalBSONValue") - errVal := fn.Call([]reflect.Value{reflect.ValueOf(t), reflect.ValueOf(src)})[0] - if !errVal.IsNil() { - return errVal.Interface().(error) - } - return nil -} - -// UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations. -func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - - if val.Kind() == reflect.Ptr && val.IsNil() { - if !val.CanSet() { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - val.Set(reflect.New(val.Type().Elem())) - } - - _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) - if err != nil { - return err - } - - // If the target Go value is a pointer and the BSON field value is empty, set the value to the - // zero value of the pointer (nil) and don't call UnmarshalBSON. UnmarshalBSON has no way to - // change the pointer value from within the function (only the value at the pointer address), - // so it can't set the pointer to "nil" itself. Since the most common Go value for an empty BSON - // field value is "nil", we set "nil" here and don't call UnmarshalBSON. This behavior matches - // the behavior of the Go "encoding/json" unmarshaler when the target Go value is a pointer and - // the JSON field value is "null". - if val.Kind() == reflect.Ptr && len(src) == 0 { - val.Set(reflect.Zero(val.Type())) - return nil - } - - if !val.Type().Implements(tUnmarshaler) { - if !val.CanAddr() { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. - } - - fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON") - errVal := fn.Call([]reflect.Value{reflect.ValueOf(src)})[0] - if !errVal.IsNil() { - return errVal.Interface().(error) - } - return nil -} - -// EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}. -// -// Deprecated: EmptyInterfaceDecodeValue is not registered by default. Use EmptyInterfaceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tEmpty { - return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - rtype, err := dc.LookupTypeMapEntry(vr.Type()) - if err != nil { - switch vr.Type() { - case bsontype.EmbeddedDocument: - if dc.Ancestor != nil { - rtype = dc.Ancestor - break - } - rtype = tD - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return err - } - } - - decoder, err := dc.LookupDecoder(rtype) - if err != nil { - return err - } - - elem := reflect.New(rtype).Elem() - err = decoder.DecodeValue(dc, vr, elem) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document. -func (DefaultValueDecoders) CoreDocumentDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tCoreDocument { - return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, 0)) - } - - val.SetLen(0) - - cdoc, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(bsoncore.Document), vr) - val.Set(reflect.ValueOf(cdoc)) - return err -} - -func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) ([]reflect.Value, error) { - elems := make([]reflect.Value, 0) - - ar, err := vr.ReadArray() - if err != nil { - return nil, err - } - - eType := val.Type().Elem() - - decoder, err := dc.LookupDecoder(eType) - if err != nil { - return nil, err - } - eTypeDecoder, _ := decoder.(typeDecoder) - - idx := 0 - for { - vr, err := ar.ReadValue() - if err == bsonrw.ErrEOA { - break - } - if err != nil { - return nil, err - } - - elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) - if err != nil { - return nil, newDecodeError(strconv.Itoa(idx), err) - } - elems = append(elems, elem) - idx++ - } - - return elems, nil -} - -func (dvd DefaultValueDecoders) readCodeWithScope(dc DecodeContext, vr bsonrw.ValueReader) (primitive.CodeWithScope, error) { - var cws primitive.CodeWithScope - - code, dr, err := vr.ReadCodeWithScope() - if err != nil { - return cws, err - } - - scope := reflect.New(tD).Elem() - elems, err := dvd.decodeElemsFromDocumentReader(dc, dr) - if err != nil { - return cws, err - } - - scope.Set(reflect.MakeSlice(tD, 0, len(elems))) - scope.Set(reflect.Append(scope, elems...)) - - cws = primitive.CodeWithScope{ - Code: primitive.JavaScript(code), - Scope: scope.Interface().(primitive.D), - } - return cws, nil -} - -func (dvd DefaultValueDecoders) codeWithScopeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tCodeWithScope { - return emptyValue, ValueDecoderError{ - Name: "CodeWithScopeDecodeValue", - Types: []reflect.Type{tCodeWithScope}, - Received: reflect.Zero(t), - } - } - - var cws primitive.CodeWithScope - var err error - switch vrType := vr.Type(); vrType { - case bsontype.CodeWithScope: - cws, err = dvd.readCodeWithScope(dc, vr) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.CodeWithScope", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(cws), nil -} - -// CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope. -func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tCodeWithScope { - return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} - } - - elem, err := dvd.codeWithScopeDecodeType(dc, vr, tCodeWithScope) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) decodeD(dc DecodeContext, vr bsonrw.ValueReader, _ reflect.Value) ([]reflect.Value, error) { - switch vr.Type() { - case bsontype.Type(0), bsontype.EmbeddedDocument: - default: - return nil, fmt.Errorf("cannot decode %v into a D", vr.Type()) - } - - dr, err := vr.ReadDocument() - if err != nil { - return nil, err - } - - return dvd.decodeElemsFromDocumentReader(dc, dr) -} - -func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr bsonrw.DocumentReader) ([]reflect.Value, error) { - decoder, err := dc.LookupDecoder(tEmpty) - if err != nil { - return nil, err - } - - elems := make([]reflect.Value, 0) - for { - key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { - break - } - if err != nil { - return nil, err - } - - val := reflect.New(tEmpty).Elem() - err = decoder.DecodeValue(dc, vr, val) - if err != nil { - return nil, newDecodeError(key, err) - } - - elems = append(elems, reflect.ValueOf(primitive.E{Key: key, Value: val.Interface()})) - } - - return elems, nil -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go deleted file mode 100644 index 6bdb43c..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go +++ /dev/null @@ -1,766 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "net/url" - "reflect" - "sync" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -var defaultValueEncoders DefaultValueEncoders - -var bvwPool = bsonrw.NewBSONValueWriterPool() - -var errInvalidValue = errors.New("cannot encode invalid element") - -var sliceWriterPool = sync.Pool{ - New: func() interface{} { - sw := make(bsonrw.SliceWriter, 0) - return &sw - }, -} - -func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) error { - vw, err := dw.WriteDocumentElement(e.Key) - if err != nil { - return err - } - - if e.Value == nil { - return vw.WriteNull() - } - encoder, err := ec.LookupEncoder(reflect.TypeOf(e.Value)) - if err != nil { - return err - } - - err = encoder.EncodeValue(ec, vw, reflect.ValueOf(e.Value)) - if err != nil { - return err - } - return nil -} - -// DefaultValueEncoders is a namespace type for the default ValueEncoders used -// when creating a registry. -type DefaultValueEncoders struct{} - -// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with -// the provided RegistryBuilder. -func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { - if rb == nil { - panic(errors.New("argument to RegisterDefaultEncoders must not be nil")) - } - rb. - RegisterTypeEncoder(tByteSlice, defaultByteSliceCodec). - RegisterTypeEncoder(tTime, defaultTimeCodec). - RegisterTypeEncoder(tEmpty, defaultEmptyInterfaceCodec). - RegisterTypeEncoder(tCoreArray, defaultArrayCodec). - RegisterTypeEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)). - RegisterTypeEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)). - RegisterTypeEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)). - RegisterTypeEncoder(tURL, ValueEncoderFunc(dve.URLEncodeValue)). - RegisterTypeEncoder(tJavaScript, ValueEncoderFunc(dve.JavaScriptEncodeValue)). - RegisterTypeEncoder(tSymbol, ValueEncoderFunc(dve.SymbolEncodeValue)). - RegisterTypeEncoder(tBinary, ValueEncoderFunc(dve.BinaryEncodeValue)). - RegisterTypeEncoder(tUndefined, ValueEncoderFunc(dve.UndefinedEncodeValue)). - RegisterTypeEncoder(tDateTime, ValueEncoderFunc(dve.DateTimeEncodeValue)). - RegisterTypeEncoder(tNull, ValueEncoderFunc(dve.NullEncodeValue)). - RegisterTypeEncoder(tRegex, ValueEncoderFunc(dve.RegexEncodeValue)). - RegisterTypeEncoder(tDBPointer, ValueEncoderFunc(dve.DBPointerEncodeValue)). - RegisterTypeEncoder(tTimestamp, ValueEncoderFunc(dve.TimestampEncodeValue)). - RegisterTypeEncoder(tMinKey, ValueEncoderFunc(dve.MinKeyEncodeValue)). - RegisterTypeEncoder(tMaxKey, ValueEncoderFunc(dve.MaxKeyEncodeValue)). - RegisterTypeEncoder(tCoreDocument, ValueEncoderFunc(dve.CoreDocumentEncodeValue)). - RegisterTypeEncoder(tCodeWithScope, ValueEncoderFunc(dve.CodeWithScopeEncodeValue)). - RegisterDefaultEncoder(reflect.Bool, ValueEncoderFunc(dve.BooleanEncodeValue)). - RegisterDefaultEncoder(reflect.Int, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int8, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int16, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int32, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int64, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Uint, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint8, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint16, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint32, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint64, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Float32, ValueEncoderFunc(dve.FloatEncodeValue)). - RegisterDefaultEncoder(reflect.Float64, ValueEncoderFunc(dve.FloatEncodeValue)). - RegisterDefaultEncoder(reflect.Array, ValueEncoderFunc(dve.ArrayEncodeValue)). - RegisterDefaultEncoder(reflect.Map, defaultMapCodec). - RegisterDefaultEncoder(reflect.Slice, defaultSliceCodec). - RegisterDefaultEncoder(reflect.String, defaultStringCodec). - RegisterDefaultEncoder(reflect.Struct, newDefaultStructCodec()). - RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec()). - RegisterHookEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)). - RegisterHookEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)). - RegisterHookEncoder(tProxy, ValueEncoderFunc(dve.ProxyEncodeValue)) -} - -// BooleanEncodeValue is the ValueEncoderFunc for bool types. -func (dve DefaultValueEncoders) BooleanEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Bool { - return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} - } - return vw.WriteBoolean(val.Bool()) -} - -func fitsIn32Bits(i int64) bool { - return math.MinInt32 <= i && i <= math.MaxInt32 -} - -// IntEncodeValue is the ValueEncoderFunc for int types. -func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Int8, reflect.Int16, reflect.Int32: - return vw.WriteInt32(int32(val.Int())) - case reflect.Int: - i64 := val.Int() - if fitsIn32Bits(i64) { - return vw.WriteInt32(int32(i64)) - } - return vw.WriteInt64(i64) - case reflect.Int64: - i64 := val.Int() - if ec.MinSize && fitsIn32Bits(i64) { - return vw.WriteInt32(int32(i64)) - } - return vw.WriteInt64(i64) - } - - return ValueEncoderError{ - Name: "IntEncodeValue", - Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, - Received: val, - } -} - -// UintEncodeValue is the ValueEncoderFunc for uint types. -// -// Deprecated: UintEncodeValue is not registered by default. Use UintCodec.EncodeValue instead. -func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Uint8, reflect.Uint16: - return vw.WriteInt32(int32(val.Uint())) - case reflect.Uint, reflect.Uint32, reflect.Uint64: - u64 := val.Uint() - if ec.MinSize && u64 <= math.MaxInt32 { - return vw.WriteInt32(int32(u64)) - } - if u64 > math.MaxInt64 { - return fmt.Errorf("%d overflows int64", u64) - } - return vw.WriteInt64(int64(u64)) - } - - return ValueEncoderError{ - Name: "UintEncodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } -} - -// FloatEncodeValue is the ValueEncoderFunc for float types. -func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Float32, reflect.Float64: - return vw.WriteDouble(val.Float()) - } - - return ValueEncoderError{Name: "FloatEncodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val} -} - -// StringEncodeValue is the ValueEncoderFunc for string types. -// -// Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead. -func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.Kind() != reflect.String { - return ValueEncoderError{ - Name: "StringEncodeValue", - Kinds: []reflect.Kind{reflect.String}, - Received: val, - } - } - - return vw.WriteString(val.String()) -} - -// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID. -func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tOID { - return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val} - } - return vw.WriteObjectID(val.Interface().(primitive.ObjectID)) -} - -// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128. -func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDecimal { - return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val} - } - return vw.WriteDecimal128(val.Interface().(primitive.Decimal128)) -} - -// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number. -func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tJSONNumber { - return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} - } - jsnum := val.Interface().(json.Number) - - // Attempt int first, then float64 - if i64, err := jsnum.Int64(); err == nil { - return dve.IntEncodeValue(ec, vw, reflect.ValueOf(i64)) - } - - f64, err := jsnum.Float64() - if err != nil { - return err - } - - return dve.FloatEncodeValue(ec, vw, reflect.ValueOf(f64)) -} - -// URLEncodeValue is the ValueEncoderFunc for url.URL. -func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tURL { - return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val} - } - u := val.Interface().(url.URL) - return vw.WriteString(u.String()) -} - -// TimeEncodeValue is the ValueEncoderFunc for time.TIme. -// -// Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead. -func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tTime { - return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} - } - tt := val.Interface().(time.Time) - dt := primitive.NewDateTimeFromTime(tt) - return vw.WriteDateTime(int64(dt)) -} - -// ByteSliceEncodeValue is the ValueEncoderFunc for []byte. -// -// Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tByteSlice { - return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - if val.IsNil() { - return vw.WriteNull() - } - return vw.WriteBinary(val.Interface().([]byte)) -} - -// MapEncodeValue is the ValueEncoderFunc for map[string]* types. -// -// Deprecated: MapEncodeValue is not registered by default. Use MapCodec.EncodeValue instead. -func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { - return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - if val.IsNil() { - // If we have a nill map but we can't WriteNull, that means we're probably trying to encode - // to a TopLevel document. We can't currently tell if this is what actually happened, but if - // there's a deeper underlying problem, the error will also be returned from WriteDocument, - // so just continue. The operations on a map reflection value are valid, so we can call - // MapKeys within mapEncodeValue without a problem. - err := vw.WriteNull() - if err == nil { - return nil - } - } - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - return dve.mapEncodeValue(ec, dw, val, nil) -} - -// mapEncodeValue handles encoding of the values of a map. The collisionFn returns -// true if the provided key exists, this is mainly used for inline maps in the -// struct codec. -func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - keys := val.MapKeys() - for _, key := range keys { - if collisionFn != nil && collisionFn(key.String()) { - return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) - } - - currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && lookupErr != errInvalidValue { - return lookupErr - } - - vw, err := dw.WriteDocumentElement(key.String()) - if err != nil { - return err - } - - if lookupErr == errInvalidValue { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} - -// ArrayEncodeValue is the ValueEncoderFunc for array types. -func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Array { - return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} - } - - // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().Elem() == tE { - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - e := val.Index(idx).Interface().(primitive.E) - err = encodeElement(ec, dw, e) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() - } - - // If we have a []byte we want to treat it as a binary instead of as an array. - if val.Type().Elem() == tByte { - var byteSlice []byte - for idx := 0; idx < val.Len(); idx++ { - byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) - } - return vw.WriteBinary(byteSlice) - } - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { - return lookupErr - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if lookupErr == errInvalidValue { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - return aw.WriteArrayEnd() -} - -// SliceEncodeValue is the ValueEncoderFunc for slice types. -// -// Deprecated: SliceEncodeValue is not registered by default. Use SliceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Slice { - return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - - // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().ConvertibleTo(tD) { - d := val.Convert(tD).Interface().(primitive.D) - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for _, e := range d { - err = encodeElement(ec, dw, e) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() - } - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { - return lookupErr - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if lookupErr == errInvalidValue { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - return aw.WriteArrayEnd() -} - -func (dve DefaultValueEncoders) lookupElementEncoder(ec EncodeContext, origEncoder ValueEncoder, currVal reflect.Value) (ValueEncoder, reflect.Value, error) { - if origEncoder != nil || (currVal.Kind() != reflect.Interface) { - return origEncoder, currVal, nil - } - currVal = currVal.Elem() - if !currVal.IsValid() { - return nil, currVal, errInvalidValue - } - currEncoder, err := ec.LookupEncoder(currVal.Type()) - - return currEncoder, currVal, err -} - -// EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}. -// -// Deprecated: EmptyInterfaceEncodeValue is not registered by default. Use EmptyInterfaceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tEmpty { - return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - encoder, err := ec.LookupEncoder(val.Elem().Type()) - if err != nil { - return err - } - - return encoder.EncodeValue(ec, vw, val.Elem()) -} - -// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations. -func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - // Either val or a pointer to val must implement ValueMarshaler - switch { - case !val.IsValid(): - return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} - case val.Type().Implements(tValueMarshaler): - // If ValueMarshaler is implemented on a concrete type, make sure that val isn't a nil pointer - if isImplementationNil(val, tValueMarshaler) { - return vw.WriteNull() - } - case reflect.PtrTo(val.Type()).Implements(tValueMarshaler) && val.CanAddr(): - val = val.Addr() - default: - return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} - } - - fn := val.Convert(tValueMarshaler).MethodByName("MarshalBSONValue") - returns := fn.Call(nil) - if !returns[2].IsNil() { - return returns[2].Interface().(error) - } - t, data := returns[0].Interface().(bsontype.Type), returns[1].Interface().([]byte) - return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data) -} - -// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations. -func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - // Either val or a pointer to val must implement Marshaler - switch { - case !val.IsValid(): - return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} - case val.Type().Implements(tMarshaler): - // If Marshaler is implemented on a concrete type, make sure that val isn't a nil pointer - if isImplementationNil(val, tMarshaler) { - return vw.WriteNull() - } - case reflect.PtrTo(val.Type()).Implements(tMarshaler) && val.CanAddr(): - val = val.Addr() - default: - return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} - } - - fn := val.Convert(tMarshaler).MethodByName("MarshalBSON") - returns := fn.Call(nil) - if !returns[1].IsNil() { - return returns[1].Interface().(error) - } - data := returns[0].Interface().([]byte) - return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data) -} - -// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations. -func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - // Either val or a pointer to val must implement Proxy - switch { - case !val.IsValid(): - return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} - case val.Type().Implements(tProxy): - // If Proxy is implemented on a concrete type, make sure that val isn't a nil pointer - if isImplementationNil(val, tProxy) { - return vw.WriteNull() - } - case reflect.PtrTo(val.Type()).Implements(tProxy) && val.CanAddr(): - val = val.Addr() - default: - return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} - } - - fn := val.Convert(tProxy).MethodByName("ProxyBSON") - returns := fn.Call(nil) - if !returns[1].IsNil() { - return returns[1].Interface().(error) - } - data := returns[0] - var encoder ValueEncoder - var err error - if data.Elem().IsValid() { - encoder, err = ec.LookupEncoder(data.Elem().Type()) - } else { - encoder, err = ec.LookupEncoder(nil) - } - if err != nil { - return err - } - return encoder.EncodeValue(ec, vw, data.Elem()) -} - -// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type. -func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tJavaScript { - return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val} - } - - return vw.WriteJavascript(val.String()) -} - -// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type. -func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tSymbol { - return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val} - } - - return vw.WriteSymbol(val.String()) -} - -// BinaryEncodeValue is the ValueEncoderFunc for Binary. -func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tBinary { - return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val} - } - b := val.Interface().(primitive.Binary) - - return vw.WriteBinaryWithSubtype(b.Data, b.Subtype) -} - -// UndefinedEncodeValue is the ValueEncoderFunc for Undefined. -func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tUndefined { - return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val} - } - - return vw.WriteUndefined() -} - -// DateTimeEncodeValue is the ValueEncoderFunc for DateTime. -func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDateTime { - return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val} - } - - return vw.WriteDateTime(val.Int()) -} - -// NullEncodeValue is the ValueEncoderFunc for Null. -func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tNull { - return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val} - } - - return vw.WriteNull() -} - -// RegexEncodeValue is the ValueEncoderFunc for Regex. -func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tRegex { - return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val} - } - - regex := val.Interface().(primitive.Regex) - - return vw.WriteRegex(regex.Pattern, regex.Options) -} - -// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer. -func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDBPointer { - return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val} - } - - dbp := val.Interface().(primitive.DBPointer) - - return vw.WriteDBPointer(dbp.DB, dbp.Pointer) -} - -// TimestampEncodeValue is the ValueEncoderFunc for Timestamp. -func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tTimestamp { - return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val} - } - - ts := val.Interface().(primitive.Timestamp) - - return vw.WriteTimestamp(ts.T, ts.I) -} - -// MinKeyEncodeValue is the ValueEncoderFunc for MinKey. -func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tMinKey { - return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val} - } - - return vw.WriteMinKey() -} - -// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey. -func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tMaxKey { - return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val} - } - - return vw.WriteMaxKey() -} - -// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document. -func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tCoreDocument { - return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} - } - - cdoc := val.Interface().(bsoncore.Document) - - return bsonrw.Copier{}.CopyDocumentFromBytes(vw, cdoc) -} - -// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope. -func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tCodeWithScope { - return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} - } - - cws := val.Interface().(primitive.CodeWithScope) - - dw, err := vw.WriteCodeWithScope(string(cws.Code)) - if err != nil { - return err - } - - sw := sliceWriterPool.Get().(*bsonrw.SliceWriter) - defer sliceWriterPool.Put(sw) - *sw = (*sw)[:0] - - scopeVW := bvwPool.Get(sw) - defer bvwPool.Put(scopeVW) - - encoder, err := ec.LookupEncoder(reflect.TypeOf(cws.Scope)) - if err != nil { - return err - } - - err = encoder.EncodeValue(ec, scopeVW, reflect.ValueOf(cws.Scope)) - if err != nil { - return err - } - - err = bsonrw.Copier{}.CopyBytesToDocumentWriter(dw, *sw) - if err != nil { - return err - } - return dw.WriteDocumentEnd() -} - -// isImplementationNil returns if val is a nil pointer and inter is implemented on a concrete type -func isImplementationNil(val reflect.Value, inter reflect.Type) bool { - vt := val.Type() - for vt.Kind() == reflect.Ptr { - vt = vt.Elem() - } - return vt.Implements(inter) && val.Kind() == reflect.Ptr && val.IsNil() -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go deleted file mode 100644 index 5f903eb..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2022-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsoncodec provides a system for encoding values to BSON representations and decoding -// values from BSON representations. This package considers both binary BSON and ExtendedJSON as -// BSON representations. The types in this package enable a flexible system for handling this -// encoding and decoding. -// -// The codec system is composed of two parts: -// -// 1) ValueEncoders and ValueDecoders that handle encoding and decoding Go values to and from BSON -// representations. -// -// 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for -// retrieving them. -// -// # ValueEncoders and ValueDecoders -// -// The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON. -// The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the -// EncodeValue method to actually create the BSON representation. For convenience, ValueEncoderFunc -// is provided to allow use of a function with the correct signature as a ValueEncoder. An -// EncodeContext instance is provided to allow implementations to lookup further ValueEncoders and -// to provide configuration information. -// -// The ValueDecoder interface is the inverse of the ValueEncoder. Implementations should ensure that -// the value they receive is settable. Similar to ValueEncoderFunc, ValueDecoderFunc is provided to -// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext -// instance is provided and serves similar functionality to the EncodeContext. -// -// # Registry and RegistryBuilder -// -// A Registry is an immutable store for ValueEncoders, ValueDecoders, and a type map. See the Registry type -// documentation for examples of registering various custom encoders and decoders. A Registry can be constructed using a -// RegistryBuilder, which handles three main types of codecs: -// -// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and RegisterTypeDecoder methods. -// The registered codec will be invoked when encoding/decoding a value whose type matches the registered type exactly. -// If the registered type is an interface, the codec will be invoked when encoding or decoding values whose type is the -// interface, but not for values with concrete types that implement the interface. -// -// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and RegisterHookDecoder methods. -// These methods only accept interface types and the registered codecs will be invoked when encoding or decoding values -// whose types implement the interface. An example of a hook defined by the driver is bson.Marshaler. The driver will -// call the MarshalBSON method for any value whose type implements bson.Marshaler, regardless of the value's concrete -// type. -// -// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type associations are used when -// decoding into a bson.D/bson.M or a struct field of type interface{}. For example, by default, BSON int32 and int64 -// values decode as Go int32 and int64 instances, respectively, when decoding into a bson.D. The following code would -// change the behavior so these values decode as Go int instances instead: -// -// intType := reflect.TypeOf(int(0)) -// registryBuilder.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) -// -// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and RegisterDefaultDecoder -// methods. The registered codec will be invoked when encoding or decoding values whose reflect.Kind matches the -// registered reflect.Kind as long as the value's type doesn't match a registered type or hook encoder/decoder first. -// These methods should be used to change the behavior for all values for a specific kind. -// -// # Registry Lookup Procedure -// -// When looking up an encoder in a Registry, the precedence rules are as follows: -// -// 1. A type encoder registered for the exact type of the value. -// -// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to the value. If the -// value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and bsoncodec.ValueMarshaler), the first -// one registered will be selected. Note that registries constructed using bson.NewRegistryBuilder have driver-defined -// hooks registered for the bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those -// will take precedence over any new hooks. -// -// 3. A kind encoder registered for the value's kind. -// -// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The same precedence -// rules apply for decoders, with the exception that an error of type ErrNoDecoder will be returned if no decoder is -// found. -// -// # DefaultValueEncoders and DefaultValueDecoders -// -// The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and -// ValueDecoders for handling a wide range of Go types, including all of the types within the -// primitive package. To make registering these codecs easier, a helper method on each type is -// provided. For the DefaultValueEncoders type the method is called RegisterDefaultEncoders and for -// the DefaultValueDecoders type the method is called RegisterDefaultDecoders, this method also -// handles registering type map entries for each BSON type. -package bsoncodec diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go deleted file mode 100644 index eda417c..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// EmptyInterfaceCodec is the Codec used for interface{} values. -type EmptyInterfaceCodec struct { - DecodeBinaryAsSlice bool -} - -var ( - defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec() - - _ ValueCodec = defaultEmptyInterfaceCodec - _ typeDecoder = defaultEmptyInterfaceCodec -) - -// NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. -func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { - interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...) - - codec := EmptyInterfaceCodec{} - if interfaceOpt.DecodeBinaryAsSlice != nil { - codec.DecodeBinaryAsSlice = *interfaceOpt.DecodeBinaryAsSlice - } - return &codec -} - -// EncodeValue is the ValueEncoderFunc for interface{}. -func (eic EmptyInterfaceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tEmpty { - return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - encoder, err := ec.LookupEncoder(val.Elem().Type()) - if err != nil { - return err - } - - return encoder.EncodeValue(ec, vw, val.Elem()) -} - -func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, valueType bsontype.Type) (reflect.Type, error) { - isDocument := valueType == bsontype.Type(0) || valueType == bsontype.EmbeddedDocument - if isDocument { - if dc.defaultDocumentType != nil { - // If the bsontype is an embedded document and the DocumentType is set on the DecodeContext, then return - // that type. - return dc.defaultDocumentType, nil - } - if dc.Ancestor != nil { - // Using ancestor information rather than looking up the type map entry forces consistent decoding. - // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry - // has been registered. - return dc.Ancestor, nil - } - } - - rtype, err := dc.LookupTypeMapEntry(valueType) - if err == nil { - return rtype, nil - } - - if isDocument { - // For documents, fallback to looking up a type map entry for bsontype.Type(0) or bsontype.EmbeddedDocument, - // depending on the original valueType. - var lookupType bsontype.Type - switch valueType { - case bsontype.Type(0): - lookupType = bsontype.EmbeddedDocument - case bsontype.EmbeddedDocument: - lookupType = bsontype.Type(0) - } - - rtype, err = dc.LookupTypeMapEntry(lookupType) - if err == nil { - return rtype, nil - } - } - - return nil, err -} - -func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tEmpty { - return emptyValue, ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: reflect.Zero(t)} - } - - rtype, err := eic.getEmptyInterfaceDecodeType(dc, vr.Type()) - if err != nil { - switch vr.Type() { - case bsontype.Null: - return reflect.Zero(t), vr.ReadNull() - default: - return emptyValue, err - } - } - - decoder, err := dc.LookupDecoder(rtype) - if err != nil { - return emptyValue, err - } - - elem, err := decodeTypeOrValue(decoder, dc, vr, rtype) - if err != nil { - return emptyValue, err - } - - if eic.DecodeBinaryAsSlice && rtype == tBinary { - binElem := elem.Interface().(primitive.Binary) - if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld { - elem = reflect.ValueOf(binElem.Data) - } - } - - return elem, nil -} - -// DecodeValue is the ValueDecoderFunc for interface{}. -func (eic EmptyInterfaceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tEmpty { - return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - elem, err := eic.decodeType(dc, vr, val.Type()) - if err != nil { - return err - } - - val.Set(elem) - return nil -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go deleted file mode 100644 index e1fbef9..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding" - "fmt" - "reflect" - "strconv" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -var defaultMapCodec = NewMapCodec() - -// MapCodec is the Codec used for map values. -type MapCodec struct { - DecodeZerosMap bool - EncodeNilAsEmpty bool - EncodeKeysWithStringer bool -} - -var _ ValueCodec = &MapCodec{} - -// KeyMarshaler is the interface implemented by an object that can marshal itself into a string key. -// This applies to types used as map keys and is similar to encoding.TextMarshaler. -type KeyMarshaler interface { - MarshalKey() (key string, err error) -} - -// KeyUnmarshaler is the interface implemented by an object that can unmarshal a string representation -// of itself. This applies to types used as map keys and is similar to encoding.TextUnmarshaler. -// -// UnmarshalKey must be able to decode the form generated by MarshalKey. -// UnmarshalKey must copy the text if it wishes to retain the text -// after returning. -type KeyUnmarshaler interface { - UnmarshalKey(key string) error -} - -// NewMapCodec returns a MapCodec with options opts. -func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { - mapOpt := bsonoptions.MergeMapCodecOptions(opts...) - - codec := MapCodec{} - if mapOpt.DecodeZerosMap != nil { - codec.DecodeZerosMap = *mapOpt.DecodeZerosMap - } - if mapOpt.EncodeNilAsEmpty != nil { - codec.EncodeNilAsEmpty = *mapOpt.EncodeNilAsEmpty - } - if mapOpt.EncodeKeysWithStringer != nil { - codec.EncodeKeysWithStringer = *mapOpt.EncodeKeysWithStringer - } - return &codec -} - -// EncodeValue is the ValueEncoder for map[*]* types. -func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Map { - return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - if val.IsNil() && !mc.EncodeNilAsEmpty { - // If we have a nil map but we can't WriteNull, that means we're probably trying to encode - // to a TopLevel document. We can't currently tell if this is what actually happened, but if - // there's a deeper underlying problem, the error will also be returned from WriteDocument, - // so just continue. The operations on a map reflection value are valid, so we can call - // MapKeys within mapEncodeValue without a problem. - err := vw.WriteNull() - if err == nil { - return nil - } - } - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - return mc.mapEncodeValue(ec, dw, val, nil) -} - -// mapEncodeValue handles encoding of the values of a map. The collisionFn returns -// true if the provided key exists, this is mainly used for inline maps in the -// struct codec. -func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - keys := val.MapKeys() - for _, key := range keys { - keyStr, err := mc.encodeKey(key) - if err != nil { - return err - } - - if collisionFn != nil && collisionFn(keyStr) { - return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) - } - - currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && lookupErr != errInvalidValue { - return lookupErr - } - - vw, err := dw.WriteDocumentElement(keyStr) - if err != nil { - return err - } - - if lookupErr == errInvalidValue { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} - -// DecodeValue is the ValueDecoder for map[string/decimal]* types. -func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if val.Kind() != reflect.Map || (!val.CanSet() && val.IsNil()) { - return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Undefined: - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - default: - return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeMap(val.Type())) - } - - if val.Len() > 0 && mc.DecodeZerosMap { - clearMap(val) - } - - eType := val.Type().Elem() - decoder, err := dc.LookupDecoder(eType) - if err != nil { - return err - } - eTypeDecoder, _ := decoder.(typeDecoder) - - if eType == tEmpty { - dc.Ancestor = val.Type() - } - - keyType := val.Type().Key() - - for { - key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { - break - } - if err != nil { - return err - } - - k, err := mc.decodeKey(key, keyType) - if err != nil { - return err - } - - elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) - if err != nil { - return newDecodeError(key, err) - } - - val.SetMapIndex(k, elem) - } - return nil -} - -func clearMap(m reflect.Value) { - var none reflect.Value - for _, k := range m.MapKeys() { - m.SetMapIndex(k, none) - } -} - -func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) { - if mc.EncodeKeysWithStringer { - return fmt.Sprint(val), nil - } - - // keys of any string type are used directly - if val.Kind() == reflect.String { - return val.String(), nil - } - // KeyMarshalers are marshaled - if km, ok := val.Interface().(KeyMarshaler); ok { - if val.Kind() == reflect.Ptr && val.IsNil() { - return "", nil - } - buf, err := km.MarshalKey() - if err == nil { - return buf, nil - } - return "", err - } - // keys implement encoding.TextMarshaler are marshaled. - if km, ok := val.Interface().(encoding.TextMarshaler); ok { - if val.Kind() == reflect.Ptr && val.IsNil() { - return "", nil - } - - buf, err := km.MarshalText() - if err != nil { - return "", err - } - - return string(buf), nil - } - - switch val.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(val.Int(), 10), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return strconv.FormatUint(val.Uint(), 10), nil - } - return "", fmt.Errorf("unsupported key type: %v", val.Type()) -} - -var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem() -var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() - -func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) { - keyVal := reflect.ValueOf(key) - var err error - switch { - // First, if EncodeKeysWithStringer is not enabled, try to decode withKeyUnmarshaler - case !mc.EncodeKeysWithStringer && reflect.PtrTo(keyType).Implements(keyUnmarshalerType): - keyVal = reflect.New(keyType) - v := keyVal.Interface().(KeyUnmarshaler) - err = v.UnmarshalKey(key) - keyVal = keyVal.Elem() - // Try to decode encoding.TextUnmarshalers. - case reflect.PtrTo(keyType).Implements(textUnmarshalerType): - keyVal = reflect.New(keyType) - v := keyVal.Interface().(encoding.TextUnmarshaler) - err = v.UnmarshalText([]byte(key)) - keyVal = keyVal.Elem() - // Otherwise, go to type specific behavior - default: - switch keyType.Kind() { - case reflect.String: - keyVal = reflect.ValueOf(key).Convert(keyType) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, parseErr := strconv.ParseInt(key, 10, 64) - if parseErr != nil || reflect.Zero(keyType).OverflowInt(n) { - err = fmt.Errorf("failed to unmarshal number key %v", key) - } - keyVal = reflect.ValueOf(n).Convert(keyType) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, parseErr := strconv.ParseUint(key, 10, 64) - if parseErr != nil || reflect.Zero(keyType).OverflowUint(n) { - err = fmt.Errorf("failed to unmarshal number key %v", key) - break - } - keyVal = reflect.ValueOf(n).Convert(keyType) - case reflect.Float32, reflect.Float64: - if mc.EncodeKeysWithStringer { - parsed, err := strconv.ParseFloat(key, 64) - if err != nil { - return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %v", keyType.Kind(), err) - } - keyVal = reflect.ValueOf(parsed) - break - } - fallthrough - default: - return keyVal, fmt.Errorf("unsupported key type: %v", keyType) - } - } - return keyVal, err -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go deleted file mode 100644 index fbd9f0a..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import "fmt" - -type mode int - -const ( - _ mode = iota - mTopLevel - mDocument - mArray - mValue - mElement - mCodeWithScope - mSpacer -) - -func (m mode) String() string { - var str string - - switch m { - case mTopLevel: - str = "TopLevel" - case mDocument: - str = "DocumentMode" - case mArray: - str = "ArrayMode" - case mValue: - str = "ValueMode" - case mElement: - str = "ElementMode" - case mCodeWithScope: - str = "CodeWithScopeMode" - case mSpacer: - str = "CodeWithScopeSpacerFrame" - default: - str = "UnknownMode" - } - - return str -} - -// TransitionError is an error returned when an invalid progressing a -// ValueReader or ValueWriter state machine occurs. -type TransitionError struct { - parent mode - current mode - destination mode -} - -func (te TransitionError) Error() string { - if te.destination == mode(0) { - return fmt.Sprintf("invalid state transition: cannot read/write value while in %s", te.current) - } - if te.parent == mode(0) { - return fmt.Sprintf("invalid state transition: %s -> %s", te.current, te.destination) - } - return fmt.Sprintf("invalid state transition: %s -> %s; parent %s", te.current, te.destination, te.parent) -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go deleted file mode 100644 index 616a3e7..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - "sync" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -var _ ValueEncoder = &PointerCodec{} -var _ ValueDecoder = &PointerCodec{} - -// PointerCodec is the Codec used for pointers. -type PointerCodec struct { - ecache map[reflect.Type]ValueEncoder - dcache map[reflect.Type]ValueDecoder - l sync.RWMutex -} - -// NewPointerCodec returns a PointerCodec that has been initialized. -func NewPointerCodec() *PointerCodec { - return &PointerCodec{ - ecache: make(map[reflect.Type]ValueEncoder), - dcache: make(map[reflect.Type]ValueDecoder), - } -} - -// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil -// or looking up an encoder for the type of value the pointer points to. -func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.Kind() != reflect.Ptr { - if !val.IsValid() { - return vw.WriteNull() - } - return ValueEncoderError{Name: "PointerCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - - pc.l.RLock() - enc, ok := pc.ecache[val.Type()] - pc.l.RUnlock() - if ok { - if enc == nil { - return ErrNoEncoder{Type: val.Type()} - } - return enc.EncodeValue(ec, vw, val.Elem()) - } - - enc, err := ec.LookupEncoder(val.Type().Elem()) - pc.l.Lock() - pc.ecache[val.Type()] = enc - pc.l.Unlock() - if err != nil { - return err - } - - return enc.EncodeValue(ec, vw, val.Elem()) -} - -// DecodeValue handles decoding a pointer by looking up a decoder for the type it points to and -// using that to decode. If the BSON value is Null, this method will set the pointer to nil. -func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Ptr { - return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} - } - - if vr.Type() == bsontype.Null { - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - } - if vr.Type() == bsontype.Undefined { - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - } - - if val.IsNil() { - val.Set(reflect.New(val.Type().Elem())) - } - - pc.l.RLock() - dec, ok := pc.dcache[val.Type()] - pc.l.RUnlock() - if ok { - if dec == nil { - return ErrNoDecoder{Type: val.Type()} - } - return dec.DecodeValue(dc, vr, val.Elem()) - } - - dec, err := dc.LookupDecoder(val.Type().Elem()) - pc.l.Lock() - pc.dcache[val.Type()] = dec - pc.l.Unlock() - if err != nil { - return err - } - - return dec.DecodeValue(dc, vr, val.Elem()) -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go deleted file mode 100644 index 4cf2b01..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -// Proxy is an interface implemented by types that cannot themselves be directly encoded. Types -// that implement this interface with have ProxyBSON called during the encoding process and that -// value will be encoded in place for the implementer. -type Proxy interface { - ProxyBSON() (interface{}, error) -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go deleted file mode 100644 index 8064402..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ /dev/null @@ -1,469 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "errors" - "fmt" - "reflect" - "sync" - - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder. -var ErrNilType = errors.New("cannot perform a decoder lookup on ") - -// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder. -var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder") - -// ErrNoEncoder is returned when there wasn't an encoder available for a type. -type ErrNoEncoder struct { - Type reflect.Type -} - -func (ene ErrNoEncoder) Error() string { - if ene.Type == nil { - return "no encoder found for " - } - return "no encoder found for " + ene.Type.String() -} - -// ErrNoDecoder is returned when there wasn't a decoder available for a type. -type ErrNoDecoder struct { - Type reflect.Type -} - -func (end ErrNoDecoder) Error() string { - return "no decoder found for " + end.Type.String() -} - -// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type. -type ErrNoTypeMapEntry struct { - Type bsontype.Type -} - -func (entme ErrNoTypeMapEntry) Error() string { - return "no type map entry found for " + entme.Type.String() -} - -// ErrNotInterface is returned when the provided type is not an interface. -var ErrNotInterface = errors.New("The provided type is not an interface") - -// A RegistryBuilder is used to build a Registry. This type is not goroutine -// safe. -type RegistryBuilder struct { - typeEncoders map[reflect.Type]ValueEncoder - interfaceEncoders []interfaceValueEncoder - kindEncoders map[reflect.Kind]ValueEncoder - - typeDecoders map[reflect.Type]ValueDecoder - interfaceDecoders []interfaceValueDecoder - kindDecoders map[reflect.Kind]ValueDecoder - - typeMap map[bsontype.Type]reflect.Type -} - -// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main -// typed passed around and Encoders and Decoders are constructed from it. -type Registry struct { - typeEncoders map[reflect.Type]ValueEncoder - typeDecoders map[reflect.Type]ValueDecoder - - interfaceEncoders []interfaceValueEncoder - interfaceDecoders []interfaceValueDecoder - - kindEncoders map[reflect.Kind]ValueEncoder - kindDecoders map[reflect.Kind]ValueDecoder - - typeMap map[bsontype.Type]reflect.Type - - mu sync.RWMutex -} - -// NewRegistryBuilder creates a new empty RegistryBuilder. -func NewRegistryBuilder() *RegistryBuilder { - return &RegistryBuilder{ - typeEncoders: make(map[reflect.Type]ValueEncoder), - typeDecoders: make(map[reflect.Type]ValueDecoder), - - interfaceEncoders: make([]interfaceValueEncoder, 0), - interfaceDecoders: make([]interfaceValueDecoder, 0), - - kindEncoders: make(map[reflect.Kind]ValueEncoder), - kindDecoders: make(map[reflect.Kind]ValueDecoder), - - typeMap: make(map[bsontype.Type]reflect.Type), - } -} - -func buildDefaultRegistry() *Registry { - rb := NewRegistryBuilder() - defaultValueEncoders.RegisterDefaultEncoders(rb) - defaultValueDecoders.RegisterDefaultDecoders(rb) - return rb.Build() -} - -// RegisterCodec will register the provided ValueCodec for the provided type. -func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder { - rb.RegisterTypeEncoder(t, codec) - rb.RegisterTypeDecoder(t, codec) - return rb -} - -// RegisterTypeEncoder will register the provided ValueEncoder for the provided type. -// -// The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered -// for a pointer to that type. -// -// If the given type is an interface, the encoder will be called when marshalling a type that is that interface. It -// will not be called when marshalling a non-interface type that implements the interface. -func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - rb.typeEncoders[t] = enc - return rb -} - -// RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when -// marshalling a type if the type implements t or a pointer to the type implements t. If the provided type is not -// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. -func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - if t.Kind() != reflect.Interface { - panicStr := fmt.Sprintf("RegisterHookEncoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", t, t.Kind()) - panic(panicStr) - } - - for idx, encoder := range rb.interfaceEncoders { - if encoder.i == t { - rb.interfaceEncoders[idx].ve = enc - return rb - } - } - - rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc}) - return rb -} - -// RegisterTypeDecoder will register the provided ValueDecoder for the provided type. -// -// The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered -// for a pointer to that type. -// -// If the given type is an interface, the decoder will be called when unmarshalling into a type that is that interface. -// It will not be called when unmarshalling into a non-interface type that implements the interface. -func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - rb.typeDecoders[t] = dec - return rb -} - -// RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when -// unmarshalling into a type if the type implements t or a pointer to the type implements t. If the provided type is not -// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. -func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - if t.Kind() != reflect.Interface { - panicStr := fmt.Sprintf("RegisterHookDecoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", t, t.Kind()) - panic(panicStr) - } - - for idx, decoder := range rb.interfaceDecoders { - if decoder.i == t { - rb.interfaceDecoders[idx].vd = dec - return rb - } - } - - rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec}) - return rb -} - -// RegisterEncoder registers the provided type and encoder pair. -// -// Deprecated: Use RegisterTypeEncoder or RegisterHookEncoder instead. -func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - if t == tEmpty { - rb.typeEncoders[t] = enc - return rb - } - switch t.Kind() { - case reflect.Interface: - for idx, ir := range rb.interfaceEncoders { - if ir.i == t { - rb.interfaceEncoders[idx].ve = enc - return rb - } - } - - rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc}) - default: - rb.typeEncoders[t] = enc - } - return rb -} - -// RegisterDecoder registers the provided type and decoder pair. -// -// Deprecated: Use RegisterTypeDecoder or RegisterHookDecoder instead. -func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - if t == nil { - rb.typeDecoders[nil] = dec - return rb - } - if t == tEmpty { - rb.typeDecoders[t] = dec - return rb - } - switch t.Kind() { - case reflect.Interface: - for idx, ir := range rb.interfaceDecoders { - if ir.i == t { - rb.interfaceDecoders[idx].vd = dec - return rb - } - } - - rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec}) - default: - rb.typeDecoders[t] = dec - } - return rb -} - -// RegisterDefaultEncoder will registr the provided ValueEncoder to the provided -// kind. -func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder { - rb.kindEncoders[kind] = enc - return rb -} - -// RegisterDefaultDecoder will register the provided ValueDecoder to the -// provided kind. -func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder { - rb.kindDecoders[kind] = dec - return rb -} - -// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this -// mapping is decoding situations where an empty interface is used and a default type needs to be -// created and decoded into. -// -// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON -// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents -// to decode to bson.Raw, use the following code: -// -// rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) -func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { - rb.typeMap[bt] = rt - return rb -} - -// Build creates a Registry from the current state of this RegistryBuilder. -func (rb *RegistryBuilder) Build() *Registry { - registry := new(Registry) - - registry.typeEncoders = make(map[reflect.Type]ValueEncoder) - for t, enc := range rb.typeEncoders { - registry.typeEncoders[t] = enc - } - - registry.typeDecoders = make(map[reflect.Type]ValueDecoder) - for t, dec := range rb.typeDecoders { - registry.typeDecoders[t] = dec - } - - registry.interfaceEncoders = make([]interfaceValueEncoder, len(rb.interfaceEncoders)) - copy(registry.interfaceEncoders, rb.interfaceEncoders) - - registry.interfaceDecoders = make([]interfaceValueDecoder, len(rb.interfaceDecoders)) - copy(registry.interfaceDecoders, rb.interfaceDecoders) - - registry.kindEncoders = make(map[reflect.Kind]ValueEncoder) - for kind, enc := range rb.kindEncoders { - registry.kindEncoders[kind] = enc - } - - registry.kindDecoders = make(map[reflect.Kind]ValueDecoder) - for kind, dec := range rb.kindDecoders { - registry.kindDecoders[kind] = dec - } - - registry.typeMap = make(map[bsontype.Type]reflect.Type) - for bt, rt := range rb.typeMap { - registry.typeMap[bt] = rt - } - - return registry -} - -// LookupEncoder inspects the registry for an encoder for the given type. The lookup precedence works as follows: -// -// 1. An encoder registered for the exact type. If the given type represents an interface, an encoder registered using -// RegisterTypeEncoder for the interface will be selected. -// -// 2. An encoder registered using RegisterHookEncoder for an interface implemented by the type or by a pointer to the -// type. -// -// 3. An encoder registered for the reflect.Kind of the value. -// -// If no encoder is found, an error of type ErrNoEncoder is returned. -func (r *Registry) LookupEncoder(t reflect.Type) (ValueEncoder, error) { - encodererr := ErrNoEncoder{Type: t} - r.mu.RLock() - enc, found := r.lookupTypeEncoder(t) - r.mu.RUnlock() - if found { - if enc == nil { - return nil, ErrNoEncoder{Type: t} - } - return enc, nil - } - - enc, found = r.lookupInterfaceEncoder(t, true) - if found { - r.mu.Lock() - r.typeEncoders[t] = enc - r.mu.Unlock() - return enc, nil - } - - if t == nil { - r.mu.Lock() - r.typeEncoders[t] = nil - r.mu.Unlock() - return nil, encodererr - } - - enc, found = r.kindEncoders[t.Kind()] - if !found { - r.mu.Lock() - r.typeEncoders[t] = nil - r.mu.Unlock() - return nil, encodererr - } - - r.mu.Lock() - r.typeEncoders[t] = enc - r.mu.Unlock() - return enc, nil -} - -func (r *Registry) lookupTypeEncoder(t reflect.Type) (ValueEncoder, bool) { - enc, found := r.typeEncoders[t] - return enc, found -} - -func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (ValueEncoder, bool) { - if t == nil { - return nil, false - } - for _, ienc := range r.interfaceEncoders { - if t.Implements(ienc.i) { - return ienc.ve, true - } - if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(ienc.i) { - // if *t implements an interface, this will catch if t implements an interface further ahead - // in interfaceEncoders - defaultEnc, found := r.lookupInterfaceEncoder(t, false) - if !found { - defaultEnc = r.kindEncoders[t.Kind()] - } - return newCondAddrEncoder(ienc.ve, defaultEnc), true - } - } - return nil, false -} - -// LookupDecoder inspects the registry for an decoder for the given type. The lookup precedence works as follows: -// -// 1. A decoder registered for the exact type. If the given type represents an interface, a decoder registered using -// RegisterTypeDecoder for the interface will be selected. -// -// 2. A decoder registered using RegisterHookDecoder for an interface implemented by the type or by a pointer to the -// type. -// -// 3. A decoder registered for the reflect.Kind of the value. -// -// If no decoder is found, an error of type ErrNoDecoder is returned. -func (r *Registry) LookupDecoder(t reflect.Type) (ValueDecoder, error) { - if t == nil { - return nil, ErrNilType - } - decodererr := ErrNoDecoder{Type: t} - r.mu.RLock() - dec, found := r.lookupTypeDecoder(t) - r.mu.RUnlock() - if found { - if dec == nil { - return nil, ErrNoDecoder{Type: t} - } - return dec, nil - } - - dec, found = r.lookupInterfaceDecoder(t, true) - if found { - r.mu.Lock() - r.typeDecoders[t] = dec - r.mu.Unlock() - return dec, nil - } - - dec, found = r.kindDecoders[t.Kind()] - if !found { - r.mu.Lock() - r.typeDecoders[t] = nil - r.mu.Unlock() - return nil, decodererr - } - - r.mu.Lock() - r.typeDecoders[t] = dec - r.mu.Unlock() - return dec, nil -} - -func (r *Registry) lookupTypeDecoder(t reflect.Type) (ValueDecoder, bool) { - dec, found := r.typeDecoders[t] - return dec, found -} - -func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (ValueDecoder, bool) { - for _, idec := range r.interfaceDecoders { - if t.Implements(idec.i) { - return idec.vd, true - } - if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(idec.i) { - // if *t implements an interface, this will catch if t implements an interface further ahead - // in interfaceDecoders - defaultDec, found := r.lookupInterfaceDecoder(t, false) - if !found { - defaultDec = r.kindDecoders[t.Kind()] - } - return newCondAddrDecoder(idec.vd, defaultDec), true - } - } - return nil, false -} - -// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON -// type. If no type is found, ErrNoTypeMapEntry is returned. -func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) { - t, ok := r.typeMap[bt] - if !ok || t == nil { - return nil, ErrNoTypeMapEntry{Type: bt} - } - return t, nil -} - -type interfaceValueEncoder struct { - i reflect.Type - ve ValueEncoder -} - -type interfaceValueDecoder struct { - i reflect.Type - vd ValueDecoder -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go deleted file mode 100644 index 3c1b6b8..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -var defaultSliceCodec = NewSliceCodec() - -// SliceCodec is the Codec used for slice values. -type SliceCodec struct { - EncodeNilAsEmpty bool -} - -var _ ValueCodec = &MapCodec{} - -// NewSliceCodec returns a MapCodec with options opts. -func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec { - sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...) - - codec := SliceCodec{} - if sliceOpt.EncodeNilAsEmpty != nil { - codec.EncodeNilAsEmpty = *sliceOpt.EncodeNilAsEmpty - } - return &codec -} - -// EncodeValue is the ValueEncoder for slice types. -func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Slice { - return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - if val.IsNil() && !sc.EncodeNilAsEmpty { - return vw.WriteNull() - } - - // If we have a []byte we want to treat it as a binary instead of as an array. - if val.Type().Elem() == tByte { - var byteSlice []byte - for idx := 0; idx < val.Len(); idx++ { - byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) - } - return vw.WriteBinary(byteSlice) - } - - // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().ConvertibleTo(tD) { - d := val.Convert(tD).Interface().(primitive.D) - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for _, e := range d { - err = encodeElement(ec, dw, e) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() - } - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { - return lookupErr - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if lookupErr == errInvalidValue { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - return aw.WriteArrayEnd() -} - -// DecodeValue is the ValueDecoder for slice types. -func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Slice { - return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Array: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Undefined: - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - case bsontype.Type(0), bsontype.EmbeddedDocument: - if val.Type().Elem() != tE { - return fmt.Errorf("cannot decode document into %s", val.Type()) - } - case bsontype.Binary: - if val.Type().Elem() != tByte { - return fmt.Errorf("SliceDecodeValue can only decode a binary into a byte array, got %v", vrType) - } - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return fmt.Errorf("SliceDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(data))) - } - - val.SetLen(0) - for _, elem := range data { - val.Set(reflect.Append(val, reflect.ValueOf(elem))) - } - return nil - case bsontype.String: - if sliceType := val.Type().Elem(); sliceType != tByte { - return fmt.Errorf("SliceDecodeValue can only decode a string into a byte array, got %v", sliceType) - } - str, err := vr.ReadString() - if err != nil { - return err - } - byteStr := []byte(str) - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr))) - } - - val.SetLen(0) - for _, elem := range byteStr { - val.Set(reflect.Append(val, reflect.ValueOf(elem))) - } - return nil - default: - return fmt.Errorf("cannot decode %v into a slice", vrType) - } - - var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) - switch val.Type().Elem() { - case tE: - dc.Ancestor = val.Type() - elemsFunc = defaultValueDecoders.decodeD - default: - elemsFunc = defaultValueDecoders.decodeDefault - } - - elems, err := elemsFunc(dc, vr, val) - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) - } - - val.SetLen(0) - val.Set(reflect.Append(val, elems...)) - - return nil -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go deleted file mode 100644 index 5332b7c..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// StringCodec is the Codec used for struct values. -type StringCodec struct { - DecodeObjectIDAsHex bool -} - -var ( - defaultStringCodec = NewStringCodec() - - _ ValueCodec = defaultStringCodec - _ typeDecoder = defaultStringCodec -) - -// NewStringCodec returns a StringCodec with options opts. -func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { - stringOpt := bsonoptions.MergeStringCodecOptions(opts...) - return &StringCodec{*stringOpt.DecodeObjectIDAsHex} -} - -// EncodeValue is the ValueEncoder for string types. -func (sc *StringCodec) EncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.Kind() != reflect.String { - return ValueEncoderError{ - Name: "StringEncodeValue", - Kinds: []reflect.Kind{reflect.String}, - Received: val, - } - } - - return vw.WriteString(val.String()) -} - -func (sc *StringCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t.Kind() != reflect.String { - return emptyValue, ValueDecoderError{ - Name: "StringDecodeValue", - Kinds: []reflect.Kind{reflect.String}, - Received: reflect.Zero(t), - } - } - - var str string - var err error - switch vr.Type() { - case bsontype.String: - str, err = vr.ReadString() - if err != nil { - return emptyValue, err - } - case bsontype.ObjectID: - oid, err := vr.ReadObjectID() - if err != nil { - return emptyValue, err - } - if sc.DecodeObjectIDAsHex { - str = oid.Hex() - } else { - byteArray := [12]byte(oid) - str = string(byteArray[:]) - } - case bsontype.Symbol: - str, err = vr.ReadSymbol() - if err != nil { - return emptyValue, err - } - case bsontype.Binary: - data, subtype, err := vr.ReadBinary() - if err != nil { - return emptyValue, err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return emptyValue, decodeBinaryError{subtype: subtype, typeName: "string"} - } - str = string(data) - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into a string type", vr.Type()) - } - - return reflect.ValueOf(str), nil -} - -// DecodeValue is the ValueDecoder for string types. -func (sc *StringCodec) DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.String { - return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} - } - - elem, err := sc.decodeType(dctx, vr, val.Type()) - if err != nil { - return err - } - - val.SetString(elem.String()) - return nil -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go deleted file mode 100644 index be3f208..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ /dev/null @@ -1,664 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strings" - "sync" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// DecodeError represents an error that occurs when unmarshalling BSON bytes into a native Go type. -type DecodeError struct { - keys []string - wrapped error -} - -// Unwrap returns the underlying error -func (de *DecodeError) Unwrap() error { - return de.wrapped -} - -// Error implements the error interface. -func (de *DecodeError) Error() string { - // The keys are stored in reverse order because the de.keys slice is builtup while propagating the error up the - // stack of BSON keys, so we call de.Keys(), which reverses them. - keyPath := strings.Join(de.Keys(), ".") - return fmt.Sprintf("error decoding key %s: %v", keyPath, de.wrapped) -} - -// Keys returns the BSON key path that caused an error as a slice of strings. The keys in the slice are in top-down -// order. For example, if the document being unmarshalled was {a: {b: {c: 1}}} and the value for c was supposed to be -// a string, the keys slice will be ["a", "b", "c"]. -func (de *DecodeError) Keys() []string { - reversedKeys := make([]string, 0, len(de.keys)) - for idx := len(de.keys) - 1; idx >= 0; idx-- { - reversedKeys = append(reversedKeys, de.keys[idx]) - } - - return reversedKeys -} - -// Zeroer allows custom struct types to implement a report of zero -// state. All struct types that don't implement Zeroer or where IsZero -// returns false are considered to be not zero. -type Zeroer interface { - IsZero() bool -} - -// StructCodec is the Codec used for struct values. -type StructCodec struct { - cache map[reflect.Type]*structDescription - l sync.RWMutex - parser StructTagParser - DecodeZeroStruct bool - DecodeDeepZeroInline bool - EncodeOmitDefaultStruct bool - AllowUnexportedFields bool - OverwriteDuplicatedInlinedFields bool -} - -var _ ValueEncoder = &StructCodec{} -var _ ValueDecoder = &StructCodec{} - -// NewStructCodec returns a StructCodec that uses p for struct tag parsing. -func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) { - if p == nil { - return nil, errors.New("a StructTagParser must be provided to NewStructCodec") - } - - structOpt := bsonoptions.MergeStructCodecOptions(opts...) - - codec := &StructCodec{ - cache: make(map[reflect.Type]*structDescription), - parser: p, - } - - if structOpt.DecodeZeroStruct != nil { - codec.DecodeZeroStruct = *structOpt.DecodeZeroStruct - } - if structOpt.DecodeDeepZeroInline != nil { - codec.DecodeDeepZeroInline = *structOpt.DecodeDeepZeroInline - } - if structOpt.EncodeOmitDefaultStruct != nil { - codec.EncodeOmitDefaultStruct = *structOpt.EncodeOmitDefaultStruct - } - if structOpt.OverwriteDuplicatedInlinedFields != nil { - codec.OverwriteDuplicatedInlinedFields = *structOpt.OverwriteDuplicatedInlinedFields - } - if structOpt.AllowUnexportedFields != nil { - codec.AllowUnexportedFields = *structOpt.AllowUnexportedFields - } - - return codec, nil -} - -// EncodeValue handles encoding generic struct types. -func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Struct { - return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} - } - - sd, err := sc.describeStruct(r.Registry, val.Type()) - if err != nil { - return err - } - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - var rv reflect.Value - for _, desc := range sd.fl { - if desc.inline == nil { - rv = val.Field(desc.idx) - } else { - rv, err = fieldByIndexErr(val, desc.inline) - if err != nil { - continue - } - } - - desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(r, desc.encoder, rv) - - if err != nil && err != errInvalidValue { - return err - } - - if err == errInvalidValue { - if desc.omitEmpty { - continue - } - vw2, err := dw.WriteDocumentElement(desc.name) - if err != nil { - return err - } - err = vw2.WriteNull() - if err != nil { - return err - } - continue - } - - if desc.encoder == nil { - return ErrNoEncoder{Type: rv.Type()} - } - - encoder := desc.encoder - - var isZero bool - rvInterface := rv.Interface() - if cz, ok := encoder.(CodecZeroer); ok { - isZero = cz.IsTypeZero(rvInterface) - } else if rv.Kind() == reflect.Interface { - // sc.isZero will not treat an interface rv as an interface, so we need to check for the zero interface separately. - isZero = rv.IsNil() - } else { - isZero = sc.isZero(rvInterface) - } - if desc.omitEmpty && isZero { - continue - } - - vw2, err := dw.WriteDocumentElement(desc.name) - if err != nil { - return err - } - - ectx := EncodeContext{Registry: r.Registry, MinSize: desc.minSize} - err = encoder.EncodeValue(ectx, vw2, rv) - if err != nil { - return err - } - } - - if sd.inlineMap >= 0 { - rv := val.Field(sd.inlineMap) - collisionFn := func(key string) bool { - _, exists := sd.fm[key] - return exists - } - - return defaultMapCodec.mapEncodeValue(r, dw, rv, collisionFn) - } - - return dw.WriteDocumentEnd() -} - -func newDecodeError(key string, original error) error { - de, ok := original.(*DecodeError) - if !ok { - return &DecodeError{ - keys: []string{key}, - wrapped: original, - } - } - - de.keys = append(de.keys, key) - return de -} - -// DecodeValue implements the Codec interface. -// By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr. -// For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared. -func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Struct { - return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - if err := vr.ReadNull(); err != nil { - return err - } - - val.Set(reflect.Zero(val.Type())) - return nil - case bsontype.Undefined: - if err := vr.ReadUndefined(); err != nil { - return err - } - - val.Set(reflect.Zero(val.Type())) - return nil - default: - return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) - } - - sd, err := sc.describeStruct(r.Registry, val.Type()) - if err != nil { - return err - } - - if sc.DecodeZeroStruct { - val.Set(reflect.Zero(val.Type())) - } - if sc.DecodeDeepZeroInline && sd.inline { - val.Set(deepZero(val.Type())) - } - - var decoder ValueDecoder - var inlineMap reflect.Value - if sd.inlineMap >= 0 { - inlineMap = val.Field(sd.inlineMap) - decoder, err = r.LookupDecoder(inlineMap.Type().Elem()) - if err != nil { - return err - } - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - for { - name, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { - break - } - if err != nil { - return err - } - - fd, exists := sd.fm[name] - if !exists { - // if the original name isn't found in the struct description, try again with the name in lowercase - // this could match if a BSON tag isn't specified because by default, describeStruct lowercases all field - // names - fd, exists = sd.fm[strings.ToLower(name)] - } - - if !exists { - if sd.inlineMap < 0 { - // The encoding/json package requires a flag to return on error for non-existent fields. - // This functionality seems appropriate for the struct codec. - err = vr.Skip() - if err != nil { - return err - } - continue - } - - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - - elem := reflect.New(inlineMap.Type().Elem()).Elem() - r.Ancestor = inlineMap.Type() - err = decoder.DecodeValue(r, vr, elem) - if err != nil { - return err - } - inlineMap.SetMapIndex(reflect.ValueOf(name), elem) - continue - } - - var field reflect.Value - if fd.inline == nil { - field = val.Field(fd.idx) - } else { - field, err = getInlineField(val, fd.inline) - if err != nil { - return err - } - } - - if !field.CanSet() { // Being settable is a super set of being addressable. - innerErr := fmt.Errorf("field %v is not settable", field) - return newDecodeError(fd.name, innerErr) - } - if field.Kind() == reflect.Ptr && field.IsNil() { - field.Set(reflect.New(field.Type().Elem())) - } - field = field.Addr() - - dctx := DecodeContext{Registry: r.Registry, Truncate: fd.truncate || r.Truncate} - if fd.decoder == nil { - return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()}) - } - - err = fd.decoder.DecodeValue(dctx, vr, field.Elem()) - if err != nil { - return newDecodeError(fd.name, err) - } - } - - return nil -} - -func (sc *StructCodec) isZero(i interface{}) bool { - v := reflect.ValueOf(i) - - // check the value validity - if !v.IsValid() { - return true - } - - if z, ok := v.Interface().(Zeroer); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) { - return z.IsZero() - } - - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Struct: - if sc.EncodeOmitDefaultStruct { - vt := v.Type() - if vt == tTime { - return v.Interface().(time.Time).IsZero() - } - for i := 0; i < v.NumField(); i++ { - if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous { - continue // Private field - } - fld := v.Field(i) - if !sc.isZero(fld.Interface()) { - return false - } - } - return true - } - } - - return false -} - -type structDescription struct { - fm map[string]fieldDescription - fl []fieldDescription - inlineMap int - inline bool -} - -type fieldDescription struct { - name string // BSON key name - fieldName string // struct field name - idx int - omitEmpty bool - minSize bool - truncate bool - inline []int - encoder ValueEncoder - decoder ValueDecoder -} - -type byIndex []fieldDescription - -func (bi byIndex) Len() int { return len(bi) } - -func (bi byIndex) Swap(i, j int) { bi[i], bi[j] = bi[j], bi[i] } - -func (bi byIndex) Less(i, j int) bool { - // If a field is inlined, its index in the top level struct is stored at inline[0] - iIdx, jIdx := bi[i].idx, bi[j].idx - if len(bi[i].inline) > 0 { - iIdx = bi[i].inline[0] - } - if len(bi[j].inline) > 0 { - jIdx = bi[j].inline[0] - } - if iIdx != jIdx { - return iIdx < jIdx - } - for k, biik := range bi[i].inline { - if k >= len(bi[j].inline) { - return false - } - if biik != bi[j].inline[k] { - return biik < bi[j].inline[k] - } - } - return len(bi[i].inline) < len(bi[j].inline) -} - -func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescription, error) { - // We need to analyze the struct, including getting the tags, collecting - // information about inlining, and create a map of the field name to the field. - sc.l.RLock() - ds, exists := sc.cache[t] - sc.l.RUnlock() - if exists { - return ds, nil - } - - numFields := t.NumField() - sd := &structDescription{ - fm: make(map[string]fieldDescription, numFields), - fl: make([]fieldDescription, 0, numFields), - inlineMap: -1, - } - - var fields []fieldDescription - for i := 0; i < numFields; i++ { - sf := t.Field(i) - if sf.PkgPath != "" && (!sc.AllowUnexportedFields || !sf.Anonymous) { - // field is private or unexported fields aren't allowed, ignore - continue - } - - sfType := sf.Type - encoder, err := r.LookupEncoder(sfType) - if err != nil { - encoder = nil - } - decoder, err := r.LookupDecoder(sfType) - if err != nil { - decoder = nil - } - - description := fieldDescription{ - fieldName: sf.Name, - idx: i, - encoder: encoder, - decoder: decoder, - } - - stags, err := sc.parser.ParseStructTags(sf) - if err != nil { - return nil, err - } - if stags.Skip { - continue - } - description.name = stags.Name - description.omitEmpty = stags.OmitEmpty - description.minSize = stags.MinSize - description.truncate = stags.Truncate - - if stags.Inline { - sd.inline = true - switch sfType.Kind() { - case reflect.Map: - if sd.inlineMap >= 0 { - return nil, errors.New("(struct " + t.String() + ") multiple inline maps") - } - if sfType.Key() != tString { - return nil, errors.New("(struct " + t.String() + ") inline map must have a string keys") - } - sd.inlineMap = description.idx - case reflect.Ptr: - sfType = sfType.Elem() - if sfType.Kind() != reflect.Struct { - return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) - } - fallthrough - case reflect.Struct: - inlinesf, err := sc.describeStruct(r, sfType) - if err != nil { - return nil, err - } - for _, fd := range inlinesf.fl { - if fd.inline == nil { - fd.inline = []int{i, fd.idx} - } else { - fd.inline = append([]int{i}, fd.inline...) - } - fields = append(fields, fd) - - } - default: - return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) - } - continue - } - fields = append(fields, description) - } - - // Sort fieldDescriptions by name and use dominance rules to determine which should be added for each name - sort.Slice(fields, func(i, j int) bool { - x := fields - // sort field by name, breaking ties with depth, then - // breaking ties with index sequence. - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].inline) != len(x[j].inline) { - return len(x[i].inline) < len(x[j].inline) - } - return byIndex(x).Less(i, j) - }) - - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - sd.fl = append(sd.fl, fi) - sd.fm[name] = fi - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if !ok || !sc.OverwriteDuplicatedInlinedFields { - return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name) - } - sd.fl = append(sd.fl, dominant) - sd.fm[name] = dominant - } - - sort.Sort(byIndex(sd.fl)) - - sc.l.Lock() - sc.cache[t] = sd - sc.l.Unlock() - - return sd, nil -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's inlining rules. If there are multiple top-level -// fields, the boolean will be false: This condition is an error in Go -// and we skip all the fields. -func dominantField(fields []fieldDescription) (fieldDescription, bool) { - // The fields are sorted in increasing index-length order, then by presence of tag. - // That means that the first field is the dominant one. We need only check - // for error cases: two fields at top level. - if len(fields) > 1 && - len(fields[0].inline) == len(fields[1].inline) { - return fieldDescription{}, false - } - return fields[0], true -} - -func fieldByIndexErr(v reflect.Value, index []int) (result reflect.Value, err error) { - defer func() { - if recovered := recover(); recovered != nil { - switch r := recovered.(type) { - case string: - err = fmt.Errorf("%s", r) - case error: - err = r - } - } - }() - - result = v.FieldByIndex(index) - return -} - -func getInlineField(val reflect.Value, index []int) (reflect.Value, error) { - field, err := fieldByIndexErr(val, index) - if err == nil { - return field, nil - } - - // if parent of this element doesn't exist, fix its parent - inlineParent := index[:len(index)-1] - var fParent reflect.Value - if fParent, err = fieldByIndexErr(val, inlineParent); err != nil { - fParent, err = getInlineField(val, inlineParent) - if err != nil { - return fParent, err - } - } - fParent.Set(reflect.New(fParent.Type().Elem())) - - return fieldByIndexErr(val, index) -} - -// DeepZero returns recursive zero object -func deepZero(st reflect.Type) (result reflect.Value) { - result = reflect.Indirect(reflect.New(st)) - - if result.Kind() == reflect.Struct { - for i := 0; i < result.NumField(); i++ { - if f := result.Field(i); f.Kind() == reflect.Ptr { - if f.CanInterface() { - if ft := reflect.TypeOf(f.Interface()); ft.Elem().Kind() == reflect.Struct { - result.Field(i).Set(recursivePointerTo(deepZero(ft.Elem()))) - } - } - } - } - } - - return -} - -// recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside -func recursivePointerTo(v reflect.Value) reflect.Value { - v = reflect.Indirect(v) - result := reflect.New(v.Type()) - if v.Kind() == reflect.Struct { - for i := 0; i < v.NumField(); i++ { - if f := v.Field(i); f.Kind() == reflect.Ptr { - if f.Elem().Kind() == reflect.Struct { - result.Elem().Field(i).Set(recursivePointerTo(f)) - } - } - } - } - - return result -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go deleted file mode 100644 index 62708c5..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - "strings" -) - -// StructTagParser returns the struct tags for a given struct field. -type StructTagParser interface { - ParseStructTags(reflect.StructField) (StructTags, error) -} - -// StructTagParserFunc is an adapter that allows a generic function to be used -// as a StructTagParser. -type StructTagParserFunc func(reflect.StructField) (StructTags, error) - -// ParseStructTags implements the StructTagParser interface. -func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructTags, error) { - return stpf(sf) -} - -// StructTags represents the struct tag fields that the StructCodec uses during -// the encoding and decoding process. -// -// In the case of a struct, the lowercased field name is used as the key for each exported -// field but this behavior may be changed using a struct tag. The tag may also contain flags to -// adjust the marshalling behavior for the field. -// -// The properties are defined below: -// -// OmitEmpty Only include the field if it's not set to the zero value for the type or to -// empty slices or maps. -// -// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's -// feasible while preserving the numeric value. -// -// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within -// a float32. -// -// Inline Inline the field, which must be a struct or a map, causing all of its fields -// or keys to be processed as if they were part of the outer struct. For maps, -// keys must not conflict with the bson keys of other struct fields. -// -// Skip This struct field should be skipped. This is usually denoted by parsing a "-" -// for the name. -// -// TODO(skriptble): Add tags for undefined as nil and for null as nil. -type StructTags struct { - Name string - OmitEmpty bool - MinSize bool - Truncate bool - Inline bool - Skip bool -} - -// DefaultStructTagParser is the StructTagParser used by the StructCodec by default. -// It will handle the bson struct tag. See the documentation for StructTags to see -// what each of the returned fields means. -// -// If there is no name in the struct tag fields, the struct field name is lowercased. -// The tag formats accepted are: -// -// "[][,[,]]" -// -// `(...) bson:"[][,[,]]" (...)` -// -// An example: -// -// type T struct { -// A bool -// B int "myb" -// C string "myc,omitempty" -// D string `bson:",omitempty" json:"jsonkey"` -// E int64 ",minsize" -// F int64 "myf,omitempty,minsize" -// } -// -// A struct tag either consisting entirely of '-' or with a bson key with a -// value consisting entirely of '-' will return a StructTags with Skip true and -// the remaining fields will be their default values. -var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { - key := strings.ToLower(sf.Name) - tag, ok := sf.Tag.Lookup("bson") - if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { - tag = string(sf.Tag) - } - return parseTags(key, tag) -} - -func parseTags(key string, tag string) (StructTags, error) { - var st StructTags - if tag == "-" { - st.Skip = true - return st, nil - } - - for idx, str := range strings.Split(tag, ",") { - if idx == 0 && str != "" { - key = str - } - switch str { - case "omitempty": - st.OmitEmpty = true - case "minsize": - st.MinSize = true - case "truncate": - st.Truncate = true - case "inline": - st.Inline = true - } - } - - st.Name = key - - return st, nil -} - -// JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser -// but will also fallback to parsing the json tag instead on a field where the -// bson tag isn't available. -var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { - key := strings.ToLower(sf.Name) - tag, ok := sf.Tag.Lookup("bson") - if !ok { - tag, ok = sf.Tag.Lookup("json") - } - if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { - tag = string(sf.Tag) - } - - return parseTags(key, tag) -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go deleted file mode 100644 index ec7e30f..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -const ( - timeFormatString = "2006-01-02T15:04:05.999Z07:00" -) - -// TimeCodec is the Codec used for time.Time values. -type TimeCodec struct { - UseLocalTimeZone bool -} - -var ( - defaultTimeCodec = NewTimeCodec() - - _ ValueCodec = defaultTimeCodec - _ typeDecoder = defaultTimeCodec -) - -// NewTimeCodec returns a TimeCodec with options opts. -func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { - timeOpt := bsonoptions.MergeTimeCodecOptions(opts...) - - codec := TimeCodec{} - if timeOpt.UseLocalTimeZone != nil { - codec.UseLocalTimeZone = *timeOpt.UseLocalTimeZone - } - return &codec -} - -func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tTime { - return emptyValue, ValueDecoderError{ - Name: "TimeDecodeValue", - Types: []reflect.Type{tTime}, - Received: reflect.Zero(t), - } - } - - var timeVal time.Time - switch vrType := vr.Type(); vrType { - case bsontype.DateTime: - dt, err := vr.ReadDateTime() - if err != nil { - return emptyValue, err - } - timeVal = time.Unix(dt/1000, dt%1000*1000000) - case bsontype.String: - // assume strings are in the isoTimeFormat - timeStr, err := vr.ReadString() - if err != nil { - return emptyValue, err - } - timeVal, err = time.Parse(timeFormatString, timeStr) - if err != nil { - return emptyValue, err - } - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - timeVal = time.Unix(i64/1000, i64%1000*1000000) - case bsontype.Timestamp: - t, _, err := vr.ReadTimestamp() - if err != nil { - return emptyValue, err - } - timeVal = time.Unix(int64(t), 0) - case bsontype.Null: - if err := vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err := vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType) - } - - if !tc.UseLocalTimeZone { - timeVal = timeVal.UTC() - } - return reflect.ValueOf(timeVal), nil -} - -// DecodeValue is the ValueDecoderFunc for time.Time. -func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tTime { - return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} - } - - elem, err := tc.decodeType(dc, vr, tTime) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// EncodeValue is the ValueEncoderFunc for time.TIme. -func (tc *TimeCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tTime { - return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} - } - tt := val.Interface().(time.Time) - dt := primitive.NewDateTimeFromTime(tt) - return vw.WriteDateTime(int64(dt)) -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go deleted file mode 100644 index 07f4b70..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding/json" - "net/url" - "reflect" - "time" - - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -var tBool = reflect.TypeOf(false) -var tFloat64 = reflect.TypeOf(float64(0)) -var tInt32 = reflect.TypeOf(int32(0)) -var tInt64 = reflect.TypeOf(int64(0)) -var tString = reflect.TypeOf("") -var tTime = reflect.TypeOf(time.Time{}) - -var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem() -var tByteSlice = reflect.TypeOf([]byte(nil)) -var tByte = reflect.TypeOf(byte(0x00)) -var tURL = reflect.TypeOf(url.URL{}) -var tJSONNumber = reflect.TypeOf(json.Number("")) - -var tValueMarshaler = reflect.TypeOf((*ValueMarshaler)(nil)).Elem() -var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem() -var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() -var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem() - -var tBinary = reflect.TypeOf(primitive.Binary{}) -var tUndefined = reflect.TypeOf(primitive.Undefined{}) -var tOID = reflect.TypeOf(primitive.ObjectID{}) -var tDateTime = reflect.TypeOf(primitive.DateTime(0)) -var tNull = reflect.TypeOf(primitive.Null{}) -var tRegex = reflect.TypeOf(primitive.Regex{}) -var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{}) -var tDBPointer = reflect.TypeOf(primitive.DBPointer{}) -var tJavaScript = reflect.TypeOf(primitive.JavaScript("")) -var tSymbol = reflect.TypeOf(primitive.Symbol("")) -var tTimestamp = reflect.TypeOf(primitive.Timestamp{}) -var tDecimal = reflect.TypeOf(primitive.Decimal128{}) -var tMinKey = reflect.TypeOf(primitive.MinKey{}) -var tMaxKey = reflect.TypeOf(primitive.MaxKey{}) -var tD = reflect.TypeOf(primitive.D{}) -var tA = reflect.TypeOf(primitive.A{}) -var tE = reflect.TypeOf(primitive.E{}) - -var tCoreDocument = reflect.TypeOf(bsoncore.Document{}) -var tCoreArray = reflect.TypeOf(bsoncore.Array{}) diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go deleted file mode 100644 index 0b21ce9..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "math" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// UIntCodec is the Codec used for uint values. -type UIntCodec struct { - EncodeToMinSize bool -} - -var ( - defaultUIntCodec = NewUIntCodec() - - _ ValueCodec = defaultUIntCodec - _ typeDecoder = defaultUIntCodec -) - -// NewUIntCodec returns a UIntCodec with options opts. -func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { - uintOpt := bsonoptions.MergeUIntCodecOptions(opts...) - - codec := UIntCodec{} - if uintOpt.EncodeToMinSize != nil { - codec.EncodeToMinSize = *uintOpt.EncodeToMinSize - } - return &codec -} - -// EncodeValue is the ValueEncoder for uint types. -func (uic *UIntCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Uint8, reflect.Uint16: - return vw.WriteInt32(int32(val.Uint())) - case reflect.Uint, reflect.Uint32, reflect.Uint64: - u64 := val.Uint() - - // If ec.MinSize or if encodeToMinSize is true for a non-uint64 value we should write val as an int32 - useMinSize := ec.MinSize || (uic.EncodeToMinSize && val.Kind() != reflect.Uint64) - - if u64 <= math.MaxInt32 && useMinSize { - return vw.WriteInt32(int32(u64)) - } - if u64 > math.MaxInt64 { - return fmt.Errorf("%d overflows int64", u64) - } - return vw.WriteInt64(int64(u64)) - } - - return ValueEncoderError{ - Name: "UintEncodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } -} - -func (uic *UIntCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - var i64 int64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - i64 = int64(i32) - case bsontype.Int64: - i64, err = vr.ReadInt64() - if err != nil { - return emptyValue, err - } - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - if !dc.Truncate && math.Floor(f64) != f64 { - return emptyValue, errCannotTruncate - } - if f64 > float64(math.MaxInt64) { - return emptyValue, fmt.Errorf("%g overflows int64", f64) - } - i64 = int64(f64) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return emptyValue, err - } - if b { - i64 = 1 - } - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) - } - - switch t.Kind() { - case reflect.Uint8: - if i64 < 0 || i64 > math.MaxUint8 { - return emptyValue, fmt.Errorf("%d overflows uint8", i64) - } - - return reflect.ValueOf(uint8(i64)), nil - case reflect.Uint16: - if i64 < 0 || i64 > math.MaxUint16 { - return emptyValue, fmt.Errorf("%d overflows uint16", i64) - } - - return reflect.ValueOf(uint16(i64)), nil - case reflect.Uint32: - if i64 < 0 || i64 > math.MaxUint32 { - return emptyValue, fmt.Errorf("%d overflows uint32", i64) - } - - return reflect.ValueOf(uint32(i64)), nil - case reflect.Uint64: - if i64 < 0 { - return emptyValue, fmt.Errorf("%d overflows uint64", i64) - } - - return reflect.ValueOf(uint64(i64)), nil - case reflect.Uint: - if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint - return emptyValue, fmt.Errorf("%d overflows uint", i64) - } - - return reflect.ValueOf(uint(i64)), nil - default: - return emptyValue, ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: reflect.Zero(t), - } - } -} - -// DecodeValue is the ValueDecoder for uint types. -func (uic *UIntCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() { - return ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } - } - - elem, err := uic.decodeType(dc, vr, val.Type()) - if err != nil { - return err - } - - val.SetUint(elem.Uint()) - return nil -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go deleted file mode 100644 index b1256a4..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding. -type ByteSliceCodecOptions struct { - EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. -} - -// ByteSliceCodec creates a new *ByteSliceCodecOptions -func ByteSliceCodec() *ByteSliceCodecOptions { - return &ByteSliceCodecOptions{} -} - -// SetEncodeNilAsEmpty specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. -func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions { - bs.EncodeNilAsEmpty = &b - return bs -} - -// MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion. -func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions { - bs := ByteSliceCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.EncodeNilAsEmpty != nil { - bs.EncodeNilAsEmpty = opt.EncodeNilAsEmpty - } - } - - return bs -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go deleted file mode 100644 index c40973c..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2022-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsonoptions defines the optional configurations for the BSON codecs. -package bsonoptions diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go deleted file mode 100644 index 6caaa00..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding. -type EmptyInterfaceCodecOptions struct { - DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. -} - -// EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions -func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions { - return &EmptyInterfaceCodecOptions{} -} - -// SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. -func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions { - e.DecodeBinaryAsSlice = &b - return e -} - -// MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion. -func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions { - e := EmptyInterfaceCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.DecodeBinaryAsSlice != nil { - e.DecodeBinaryAsSlice = opt.DecodeBinaryAsSlice - } - } - - return e -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go deleted file mode 100644 index 7a6a880..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// MapCodecOptions represents all possible options for map encoding and decoding. -type MapCodecOptions struct { - DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false. - EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false. - // Specifies how keys should be handled. If false, the behavior matches encoding/json, where the encoding key type must - // either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key type must either be a - // string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with fmt.Sprint() and the - // encoding key type must be a string, an integer type, or a float. If true, the use of Stringer will override - // TextMarshaler/TextUnmarshaler. Defaults to false. - EncodeKeysWithStringer *bool -} - -// MapCodec creates a new *MapCodecOptions -func MapCodec() *MapCodecOptions { - return &MapCodecOptions{} -} - -// SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false. -func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions { - t.DecodeZerosMap = &b - return t -} - -// SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false. -func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { - t.EncodeNilAsEmpty = &b - return t -} - -// SetEncodeKeysWithStringer specifies how keys should be handled. If false, the behavior matches encoding/json, where the -// encoding key type must either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key -// type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with -// fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer -// will override TextMarshaler/TextUnmarshaler. Defaults to false. -func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions { - t.EncodeKeysWithStringer = &b - return t -} - -// MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion. -func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions { - s := MapCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.DecodeZerosMap != nil { - s.DecodeZerosMap = opt.DecodeZerosMap - } - if opt.EncodeNilAsEmpty != nil { - s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty - } - if opt.EncodeKeysWithStringer != nil { - s.EncodeKeysWithStringer = opt.EncodeKeysWithStringer - } - } - - return s -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go deleted file mode 100644 index ef965e4..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// SliceCodecOptions represents all possible options for slice encoding and decoding. -type SliceCodecOptions struct { - EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false. -} - -// SliceCodec creates a new *SliceCodecOptions -func SliceCodec() *SliceCodecOptions { - return &SliceCodecOptions{} -} - -// SetEncodeNilAsEmpty specifies if a nil slice should encode as an empty array instead of null. Defaults to false. -func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions { - s.EncodeNilAsEmpty = &b - return s -} - -// MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion. -func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions { - s := SliceCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.EncodeNilAsEmpty != nil { - s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty - } - } - - return s -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go deleted file mode 100644 index 65964f4..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -var defaultDecodeOIDAsHex = true - -// StringCodecOptions represents all possible options for string encoding and decoding. -type StringCodecOptions struct { - DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true. -} - -// StringCodec creates a new *StringCodecOptions -func StringCodec() *StringCodecOptions { - return &StringCodecOptions{} -} - -// SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made -// from the raw object ID bytes will be used. Defaults to true. -func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions { - t.DecodeObjectIDAsHex = &b - return t -} - -// MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion. -func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions { - s := &StringCodecOptions{&defaultDecodeOIDAsHex} - for _, opt := range opts { - if opt == nil { - continue - } - if opt.DecodeObjectIDAsHex != nil { - s.DecodeObjectIDAsHex = opt.DecodeObjectIDAsHex - } - } - - return s -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go deleted file mode 100644 index 78d1dd8..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -var defaultOverwriteDuplicatedInlinedFields = true - -// StructCodecOptions represents all possible options for struct encoding and decoding. -type StructCodecOptions struct { - DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false. - DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false. - EncodeOmitDefaultStruct *bool // Specifies if default structs should be considered empty by omitempty. Defaults to false. - AllowUnexportedFields *bool // Specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. - OverwriteDuplicatedInlinedFields *bool // Specifies if fields in inlined structs can be overwritten by higher level struct fields with the same key. Defaults to true. -} - -// StructCodec creates a new *StructCodecOptions -func StructCodec() *StructCodecOptions { - return &StructCodecOptions{} -} - -// SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false. -func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions { - t.DecodeZeroStruct = &b - return t -} - -// SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false. -func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions { - t.DecodeDeepZeroInline = &b - return t -} - -// SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all -// its values set to their default value. Defaults to false. -func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions { - t.EncodeOmitDefaultStruct = &b - return t -} - -// SetOverwriteDuplicatedInlinedFields specifies if inlined struct fields can be overwritten by higher level struct fields with the -// same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when -// encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if -// there are duplicate keys after the struct is inlined. Defaults to true. -func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions { - t.OverwriteDuplicatedInlinedFields = &b - return t -} - -// SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. -func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions { - t.AllowUnexportedFields = &b - return t -} - -// MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion. -func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions { - s := &StructCodecOptions{ - OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields, - } - for _, opt := range opts { - if opt == nil { - continue - } - - if opt.DecodeZeroStruct != nil { - s.DecodeZeroStruct = opt.DecodeZeroStruct - } - if opt.DecodeDeepZeroInline != nil { - s.DecodeDeepZeroInline = opt.DecodeDeepZeroInline - } - if opt.EncodeOmitDefaultStruct != nil { - s.EncodeOmitDefaultStruct = opt.EncodeOmitDefaultStruct - } - if opt.OverwriteDuplicatedInlinedFields != nil { - s.OverwriteDuplicatedInlinedFields = opt.OverwriteDuplicatedInlinedFields - } - if opt.AllowUnexportedFields != nil { - s.AllowUnexportedFields = opt.AllowUnexportedFields - } - } - - return s -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go deleted file mode 100644 index 13496d1..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// TimeCodecOptions represents all possible options for time.Time encoding and decoding. -type TimeCodecOptions struct { - UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false. -} - -// TimeCodec creates a new *TimeCodecOptions -func TimeCodec() *TimeCodecOptions { - return &TimeCodecOptions{} -} - -// SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. -func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions { - t.UseLocalTimeZone = &b - return t -} - -// MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion. -func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions { - t := TimeCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.UseLocalTimeZone != nil { - t.UseLocalTimeZone = opt.UseLocalTimeZone - } - } - - return t -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go deleted file mode 100644 index e08b7f1..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// UIntCodecOptions represents all possible options for uint encoding and decoding. -type UIntCodecOptions struct { - EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. -} - -// UIntCodec creates a new *UIntCodecOptions -func UIntCodec() *UIntCodecOptions { - return &UIntCodecOptions{} -} - -// SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. -func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions { - u.EncodeToMinSize = &b - return u -} - -// MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion. -func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions { - u := UIntCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.EncodeToMinSize != nil { - u.EncodeToMinSize = opt.EncodeToMinSize - } - } - - return u -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go deleted file mode 100644 index 5cdf646..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go +++ /dev/null @@ -1,445 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonrw - -import ( - "fmt" - "io" - - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -// Copier is a type that allows copying between ValueReaders, ValueWriters, and -// []byte values. -type Copier struct{} - -// NewCopier creates a new copier with the given registry. If a nil registry is provided -// a default registry is used. -func NewCopier() Copier { - return Copier{} -} - -// CopyDocument handles copying a document from src to dst. -func CopyDocument(dst ValueWriter, src ValueReader) error { - return Copier{}.CopyDocument(dst, src) -} - -// CopyDocument handles copying one document from the src to the dst. -func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { - dr, err := src.ReadDocument() - if err != nil { - return err - } - - dw, err := dst.WriteDocument() - if err != nil { - return err - } - - return c.copyDocumentCore(dw, dr) -} - -// CopyArrayFromBytes copies the values from a BSON array represented as a -// []byte to a ValueWriter. -func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error { - aw, err := dst.WriteArray() - if err != nil { - return err - } - - err = c.CopyBytesToArrayWriter(aw, src) - if err != nil { - return err - } - - return aw.WriteArrayEnd() -} - -// CopyDocumentFromBytes copies the values from a BSON document represented as a -// []byte to a ValueWriter. -func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error { - dw, err := dst.WriteDocument() - if err != nil { - return err - } - - err = c.CopyBytesToDocumentWriter(dw, src) - if err != nil { - return err - } - - return dw.WriteDocumentEnd() -} - -type writeElementFn func(key string) (ValueWriter, error) - -// CopyBytesToArrayWriter copies the values from a BSON Array represented as a []byte to an -// ArrayWriter. -func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error { - wef := func(_ string) (ValueWriter, error) { - return dst.WriteArrayElement() - } - - return c.copyBytesToValueWriter(src, wef) -} - -// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a -// DocumentWriter. -func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error { - wef := func(key string) (ValueWriter, error) { - return dst.WriteDocumentElement(key) - } - - return c.copyBytesToValueWriter(src, wef) -} - -func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { - // TODO(skriptble): Create errors types here. Anything thats a tag should be a property. - length, rem, ok := bsoncore.ReadLength(src) - if !ok { - return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src)) - } - if len(src) < int(length) { - return fmt.Errorf("length read exceeds number of bytes available. length=%d bytes=%d", len(src), length) - } - rem = rem[:length-4] - - var t bsontype.Type - var key string - var val bsoncore.Value - for { - t, rem, ok = bsoncore.ReadType(rem) - if !ok { - return io.EOF - } - if t == bsontype.Type(0) { - if len(rem) != 0 { - return fmt.Errorf("document end byte found before end of document. remaining bytes=%v", rem) - } - break - } - - key, rem, ok = bsoncore.ReadKey(rem) - if !ok { - return fmt.Errorf("invalid key found. remaining bytes=%v", rem) - } - - // write as either array element or document element using writeElementFn - vw, err := wef(key) - if err != nil { - return err - } - - val, rem, ok = bsoncore.ReadValue(rem, t) - if !ok { - return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t) - } - err = c.CopyValueFromBytes(vw, t, val.Data) - if err != nil { - return err - } - } - return nil -} - -// CopyDocumentToBytes copies an entire document from the ValueReader and -// returns it as bytes. -func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) { - return c.AppendDocumentBytes(nil, src) -} - -// AppendDocumentBytes functions the same as CopyDocumentToBytes, but will -// append the result to dst. -func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) { - if br, ok := src.(BytesReader); ok { - _, dst, err := br.ReadValueBytes(dst) - return dst, err - } - - vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) - - vw.reset(dst) - - err := c.CopyDocument(vw, src) - dst = vw.buf - return dst, err -} - -// AppendArrayBytes copies an array from the ValueReader to dst. -func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { - if br, ok := src.(BytesReader); ok { - _, dst, err := br.ReadValueBytes(dst) - return dst, err - } - - vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) - - vw.reset(dst) - - err := c.copyArray(vw, src) - dst = vw.buf - return dst, err -} - -// CopyValueFromBytes will write the value represtend by t and src to dst. -func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error { - if wvb, ok := dst.(BytesWriter); ok { - return wvb.WriteValueBytes(t, src) - } - - vr := vrPool.Get().(*valueReader) - defer vrPool.Put(vr) - - vr.reset(src) - vr.pushElement(t) - - return c.CopyValue(dst, vr) -} - -// CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a -// []byte. -func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) { - return c.AppendValueBytes(nil, src) -} - -// AppendValueBytes functions the same as CopyValueToBytes, but will append the -// result to dst. -func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) { - if br, ok := src.(BytesReader); ok { - return br.ReadValueBytes(dst) - } - - vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) - - start := len(dst) - - vw.reset(dst) - vw.push(mElement) - - err := c.CopyValue(vw, src) - if err != nil { - return 0, dst, err - } - - return bsontype.Type(vw.buf[start]), vw.buf[start+2:], nil -} - -// CopyValue will copy a single value from src to dst. -func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error { - var err error - switch src.Type() { - case bsontype.Double: - var f64 float64 - f64, err = src.ReadDouble() - if err != nil { - break - } - err = dst.WriteDouble(f64) - case bsontype.String: - var str string - str, err = src.ReadString() - if err != nil { - return err - } - err = dst.WriteString(str) - case bsontype.EmbeddedDocument: - err = c.CopyDocument(dst, src) - case bsontype.Array: - err = c.copyArray(dst, src) - case bsontype.Binary: - var data []byte - var subtype byte - data, subtype, err = src.ReadBinary() - if err != nil { - break - } - err = dst.WriteBinaryWithSubtype(data, subtype) - case bsontype.Undefined: - err = src.ReadUndefined() - if err != nil { - break - } - err = dst.WriteUndefined() - case bsontype.ObjectID: - var oid primitive.ObjectID - oid, err = src.ReadObjectID() - if err != nil { - break - } - err = dst.WriteObjectID(oid) - case bsontype.Boolean: - var b bool - b, err = src.ReadBoolean() - if err != nil { - break - } - err = dst.WriteBoolean(b) - case bsontype.DateTime: - var dt int64 - dt, err = src.ReadDateTime() - if err != nil { - break - } - err = dst.WriteDateTime(dt) - case bsontype.Null: - err = src.ReadNull() - if err != nil { - break - } - err = dst.WriteNull() - case bsontype.Regex: - var pattern, options string - pattern, options, err = src.ReadRegex() - if err != nil { - break - } - err = dst.WriteRegex(pattern, options) - case bsontype.DBPointer: - var ns string - var pointer primitive.ObjectID - ns, pointer, err = src.ReadDBPointer() - if err != nil { - break - } - err = dst.WriteDBPointer(ns, pointer) - case bsontype.JavaScript: - var js string - js, err = src.ReadJavascript() - if err != nil { - break - } - err = dst.WriteJavascript(js) - case bsontype.Symbol: - var symbol string - symbol, err = src.ReadSymbol() - if err != nil { - break - } - err = dst.WriteSymbol(symbol) - case bsontype.CodeWithScope: - var code string - var srcScope DocumentReader - code, srcScope, err = src.ReadCodeWithScope() - if err != nil { - break - } - - var dstScope DocumentWriter - dstScope, err = dst.WriteCodeWithScope(code) - if err != nil { - break - } - err = c.copyDocumentCore(dstScope, srcScope) - case bsontype.Int32: - var i32 int32 - i32, err = src.ReadInt32() - if err != nil { - break - } - err = dst.WriteInt32(i32) - case bsontype.Timestamp: - var t, i uint32 - t, i, err = src.ReadTimestamp() - if err != nil { - break - } - err = dst.WriteTimestamp(t, i) - case bsontype.Int64: - var i64 int64 - i64, err = src.ReadInt64() - if err != nil { - break - } - err = dst.WriteInt64(i64) - case bsontype.Decimal128: - var d128 primitive.Decimal128 - d128, err = src.ReadDecimal128() - if err != nil { - break - } - err = dst.WriteDecimal128(d128) - case bsontype.MinKey: - err = src.ReadMinKey() - if err != nil { - break - } - err = dst.WriteMinKey() - case bsontype.MaxKey: - err = src.ReadMaxKey() - if err != nil { - break - } - err = dst.WriteMaxKey() - default: - err = fmt.Errorf("Cannot copy unknown BSON type %s", src.Type()) - } - - return err -} - -func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { - ar, err := src.ReadArray() - if err != nil { - return err - } - - aw, err := dst.WriteArray() - if err != nil { - return err - } - - for { - vr, err := ar.ReadValue() - if err == ErrEOA { - break - } - if err != nil { - return err - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - err = c.CopyValue(vw, vr) - if err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error { - for { - key, vr, err := dr.ReadElement() - if err == ErrEOD { - break - } - if err != nil { - return err - } - - vw, err := dw.WriteDocumentElement(key) - if err != nil { - return err - } - - err = c.CopyValue(vw, vr) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go deleted file mode 100644 index 750b0d2..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsonrw contains abstractions for reading and writing -// BSON and BSON like types from sources. -package bsonrw // import "go.mongodb.org/mongo-driver/bson/bsonrw" diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go deleted file mode 100644 index 54c76bf..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go +++ /dev/null @@ -1,806 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonrw - -import ( - "encoding/base64" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" - - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -const maxNestingDepth = 200 - -// ErrInvalidJSON indicates the JSON input is invalid -var ErrInvalidJSON = errors.New("invalid JSON input") - -type jsonParseState byte - -const ( - jpsStartState jsonParseState = iota - jpsSawBeginObject - jpsSawEndObject - jpsSawBeginArray - jpsSawEndArray - jpsSawColon - jpsSawComma - jpsSawKey - jpsSawValue - jpsDoneState - jpsInvalidState -) - -type jsonParseMode byte - -const ( - jpmInvalidMode jsonParseMode = iota - jpmObjectMode - jpmArrayMode -) - -type extJSONValue struct { - t bsontype.Type - v interface{} -} - -type extJSONObject struct { - keys []string - values []*extJSONValue -} - -type extJSONParser struct { - js *jsonScanner - s jsonParseState - m []jsonParseMode - k string - v *extJSONValue - - err error - canonical bool - depth int - maxDepth int - - emptyObject bool - relaxedUUID bool -} - -// newExtJSONParser returns a new extended JSON parser, ready to to begin -// parsing from the first character of the argued json input. It will not -// perform any read-ahead and will therefore not report any errors about -// malformed JSON at this point. -func newExtJSONParser(r io.Reader, canonical bool) *extJSONParser { - return &extJSONParser{ - js: &jsonScanner{r: r}, - s: jpsStartState, - m: []jsonParseMode{}, - canonical: canonical, - maxDepth: maxNestingDepth, - } -} - -// peekType examines the next value and returns its BSON Type -func (ejp *extJSONParser) peekType() (bsontype.Type, error) { - var t bsontype.Type - var err error - initialState := ejp.s - - ejp.advanceState() - switch ejp.s { - case jpsSawValue: - t = ejp.v.t - case jpsSawBeginArray: - t = bsontype.Array - case jpsInvalidState: - err = ejp.err - case jpsSawComma: - // in array mode, seeing a comma means we need to progress again to actually observe a type - if ejp.peekMode() == jpmArrayMode { - return ejp.peekType() - } - case jpsSawEndArray: - // this would only be a valid state if we were in array mode, so return end-of-array error - err = ErrEOA - case jpsSawBeginObject: - // peek key to determine type - ejp.advanceState() - switch ejp.s { - case jpsSawEndObject: // empty embedded document - t = bsontype.EmbeddedDocument - ejp.emptyObject = true - case jpsInvalidState: - err = ejp.err - case jpsSawKey: - if initialState == jpsStartState { - return bsontype.EmbeddedDocument, nil - } - t = wrapperKeyBSONType(ejp.k) - - // if $uuid is encountered, parse as binary subtype 4 - if ejp.k == "$uuid" { - ejp.relaxedUUID = true - t = bsontype.Binary - } - - switch t { - case bsontype.JavaScript: - // just saw $code, need to check for $scope at same level - _, err = ejp.readValue(bsontype.JavaScript) - if err != nil { - break - } - - switch ejp.s { - case jpsSawEndObject: // type is TypeJavaScript - case jpsSawComma: - ejp.advanceState() - - if ejp.s == jpsSawKey && ejp.k == "$scope" { - t = bsontype.CodeWithScope - } else { - err = fmt.Errorf("invalid extended JSON: unexpected key %s in CodeWithScope object", ejp.k) - } - case jpsInvalidState: - err = ejp.err - default: - err = ErrInvalidJSON - } - case bsontype.CodeWithScope: - err = errors.New("invalid extended JSON: code with $scope must contain $code before $scope") - } - } - } - - return t, err -} - -// readKey parses the next key and its type and returns them -func (ejp *extJSONParser) readKey() (string, bsontype.Type, error) { - if ejp.emptyObject { - ejp.emptyObject = false - return "", 0, ErrEOD - } - - // advance to key (or return with error) - switch ejp.s { - case jpsStartState: - ejp.advanceState() - if ejp.s == jpsSawBeginObject { - ejp.advanceState() - } - case jpsSawBeginObject: - ejp.advanceState() - case jpsSawValue, jpsSawEndObject, jpsSawEndArray: - ejp.advanceState() - switch ejp.s { - case jpsSawBeginObject, jpsSawComma: - ejp.advanceState() - case jpsSawEndObject: - return "", 0, ErrEOD - case jpsDoneState: - return "", 0, io.EOF - case jpsInvalidState: - return "", 0, ejp.err - default: - return "", 0, ErrInvalidJSON - } - case jpsSawKey: // do nothing (key was peeked before) - default: - return "", 0, invalidRequestError("key") - } - - // read key - var key string - - switch ejp.s { - case jpsSawKey: - key = ejp.k - case jpsSawEndObject: - return "", 0, ErrEOD - case jpsInvalidState: - return "", 0, ejp.err - default: - return "", 0, invalidRequestError("key") - } - - // check for colon - ejp.advanceState() - if err := ensureColon(ejp.s, key); err != nil { - return "", 0, err - } - - // peek at the value to determine type - t, err := ejp.peekType() - if err != nil { - return "", 0, err - } - - return key, t, nil -} - -// readValue returns the value corresponding to the Type returned by peekType -func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { - if ejp.s == jpsInvalidState { - return nil, ejp.err - } - - var v *extJSONValue - - switch t { - case bsontype.Null, bsontype.Boolean, bsontype.String: - if ejp.s != jpsSawValue { - return nil, invalidRequestError(t.String()) - } - v = ejp.v - case bsontype.Int32, bsontype.Int64, bsontype.Double: - // relaxed version allows these to be literal number values - if ejp.s == jpsSawValue { - v = ejp.v - break - } - fallthrough - case bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID, bsontype.MinKey, bsontype.MaxKey, bsontype.Undefined: - switch ejp.s { - case jpsSawKey: - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - // read value - ejp.advanceState() - if ejp.s != jpsSawValue || !ejp.ensureExtValueType(t) { - return nil, invalidJSONErrorForType("value", t) - } - - v = ejp.v - - // read end object - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("} after value", t) - } - default: - return nil, invalidRequestError(t.String()) - } - case bsontype.Binary, bsontype.Regex, bsontype.Timestamp, bsontype.DBPointer: - if ejp.s != jpsSawKey { - return nil, invalidRequestError(t.String()) - } - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - ejp.advanceState() - if t == bsontype.Binary && ejp.s == jpsSawValue { - // convert relaxed $uuid format - if ejp.relaxedUUID { - defer func() { ejp.relaxedUUID = false }() - uuid, err := ejp.v.parseSymbol() - if err != nil { - return nil, err - } - - // RFC 4122 defines the length of a UUID as 36 and the hyphens in a UUID as appearing - // in the 8th, 13th, 18th, and 23rd characters. - // - // See https://tools.ietf.org/html/rfc4122#section-3 - valid := len(uuid) == 36 && - string(uuid[8]) == "-" && - string(uuid[13]) == "-" && - string(uuid[18]) == "-" && - string(uuid[23]) == "-" - if !valid { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") - } - - // remove hyphens - uuidNoHyphens := strings.Replace(uuid, "-", "", -1) - if len(uuidNoHyphens) != 32 { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") - } - - // convert hex to bytes - bytes, err := hex.DecodeString(uuidNoHyphens) - if err != nil { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %v", err) - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("$uuid and value and then }", bsontype.Binary) - } - - base64 := &extJSONValue{ - t: bsontype.String, - v: base64.StdEncoding.EncodeToString(bytes), - } - subType := &extJSONValue{ - t: bsontype.String, - v: "04", - } - - v = &extJSONValue{ - t: bsontype.EmbeddedDocument, - v: &extJSONObject{ - keys: []string{"base64", "subType"}, - values: []*extJSONValue{base64, subType}, - }, - } - - break - } - - // convert legacy $binary format - base64 := ejp.v - - ejp.advanceState() - if ejp.s != jpsSawComma { - return nil, invalidJSONErrorForType(",", bsontype.Binary) - } - - ejp.advanceState() - key, t, err := ejp.readKey() - if err != nil { - return nil, err - } - if key != "$type" { - return nil, invalidJSONErrorForType("$type", bsontype.Binary) - } - - subType, err := ejp.readValue(t) - if err != nil { - return nil, err - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("2 key-value pairs and then }", bsontype.Binary) - } - - v = &extJSONValue{ - t: bsontype.EmbeddedDocument, - v: &extJSONObject{ - keys: []string{"base64", "subType"}, - values: []*extJSONValue{base64, subType}, - }, - } - break - } - - // read KV pairs - if ejp.s != jpsSawBeginObject { - return nil, invalidJSONErrorForType("{", t) - } - - keys, vals, err := ejp.readObject(2, true) - if err != nil { - return nil, err - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("2 key-value pairs and then }", t) - } - - v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} - - case bsontype.DateTime: - switch ejp.s { - case jpsSawValue: - v = ejp.v - case jpsSawKey: - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - ejp.advanceState() - switch ejp.s { - case jpsSawBeginObject: - keys, vals, err := ejp.readObject(1, true) - if err != nil { - return nil, err - } - v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} - case jpsSawValue: - if ejp.canonical { - return nil, invalidJSONError("{") - } - v = ejp.v - default: - if ejp.canonical { - return nil, invalidJSONErrorForType("object", t) - } - return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as described in RFC-3339", t) - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("value and then }", t) - } - default: - return nil, invalidRequestError(t.String()) - } - case bsontype.JavaScript: - switch ejp.s { - case jpsSawKey: - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - // read value - ejp.advanceState() - if ejp.s != jpsSawValue { - return nil, invalidJSONErrorForType("value", t) - } - v = ejp.v - - // read end object or comma and just return - ejp.advanceState() - case jpsSawEndObject: - v = ejp.v - default: - return nil, invalidRequestError(t.String()) - } - case bsontype.CodeWithScope: - if ejp.s == jpsSawKey && ejp.k == "$scope" { - v = ejp.v // this is the $code string from earlier - - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - // read { - ejp.advanceState() - if ejp.s != jpsSawBeginObject { - return nil, invalidJSONError("$scope to be embedded document") - } - } else { - return nil, invalidRequestError(t.String()) - } - case bsontype.EmbeddedDocument, bsontype.Array: - return nil, invalidRequestError(t.String()) - } - - return v, nil -} - -// readObject is a utility method for reading full objects of known (or expected) size -// it is useful for extended JSON types such as binary, datetime, regex, and timestamp -func (ejp *extJSONParser) readObject(numKeys int, started bool) ([]string, []*extJSONValue, error) { - keys := make([]string, numKeys) - vals := make([]*extJSONValue, numKeys) - - if !started { - ejp.advanceState() - if ejp.s != jpsSawBeginObject { - return nil, nil, invalidJSONError("{") - } - } - - for i := 0; i < numKeys; i++ { - key, t, err := ejp.readKey() - if err != nil { - return nil, nil, err - } - - switch ejp.s { - case jpsSawKey: - v, err := ejp.readValue(t) - if err != nil { - return nil, nil, err - } - - keys[i] = key - vals[i] = v - case jpsSawValue: - keys[i] = key - vals[i] = ejp.v - default: - return nil, nil, invalidJSONError("value") - } - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, nil, invalidJSONError("}") - } - - return keys, vals, nil -} - -// advanceState reads the next JSON token from the scanner and transitions -// from the current state based on that token's type -func (ejp *extJSONParser) advanceState() { - if ejp.s == jpsDoneState || ejp.s == jpsInvalidState { - return - } - - jt, err := ejp.js.nextToken() - - if err != nil { - ejp.err = err - ejp.s = jpsInvalidState - return - } - - valid := ejp.validateToken(jt.t) - if !valid { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - return - } - - switch jt.t { - case jttBeginObject: - ejp.s = jpsSawBeginObject - ejp.pushMode(jpmObjectMode) - ejp.depth++ - - if ejp.depth > ejp.maxDepth { - ejp.err = nestingDepthError(jt.p, ejp.depth) - ejp.s = jpsInvalidState - } - case jttEndObject: - ejp.s = jpsSawEndObject - ejp.depth-- - - if ejp.popMode() != jpmObjectMode { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - } - case jttBeginArray: - ejp.s = jpsSawBeginArray - ejp.pushMode(jpmArrayMode) - case jttEndArray: - ejp.s = jpsSawEndArray - - if ejp.popMode() != jpmArrayMode { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - } - case jttColon: - ejp.s = jpsSawColon - case jttComma: - ejp.s = jpsSawComma - case jttEOF: - ejp.s = jpsDoneState - if len(ejp.m) != 0 { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - } - case jttString: - switch ejp.s { - case jpsSawComma: - if ejp.peekMode() == jpmArrayMode { - ejp.s = jpsSawValue - ejp.v = extendJSONToken(jt) - return - } - fallthrough - case jpsSawBeginObject: - ejp.s = jpsSawKey - ejp.k = jt.v.(string) - return - } - fallthrough - default: - ejp.s = jpsSawValue - ejp.v = extendJSONToken(jt) - } -} - -var jpsValidTransitionTokens = map[jsonParseState]map[jsonTokenType]bool{ - jpsStartState: { - jttBeginObject: true, - jttBeginArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - jttEOF: true, - }, - jpsSawBeginObject: { - jttEndObject: true, - jttString: true, - }, - jpsSawEndObject: { - jttEndObject: true, - jttEndArray: true, - jttComma: true, - jttEOF: true, - }, - jpsSawBeginArray: { - jttBeginObject: true, - jttBeginArray: true, - jttEndArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - }, - jpsSawEndArray: { - jttEndObject: true, - jttEndArray: true, - jttComma: true, - jttEOF: true, - }, - jpsSawColon: { - jttBeginObject: true, - jttBeginArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - }, - jpsSawComma: { - jttBeginObject: true, - jttBeginArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - }, - jpsSawKey: { - jttColon: true, - }, - jpsSawValue: { - jttEndObject: true, - jttEndArray: true, - jttComma: true, - jttEOF: true, - }, - jpsDoneState: {}, - jpsInvalidState: {}, -} - -func (ejp *extJSONParser) validateToken(jtt jsonTokenType) bool { - switch ejp.s { - case jpsSawEndObject: - // if we are at depth zero and the next token is a '{', - // we can consider it valid only if we are not in array mode. - if jtt == jttBeginObject && ejp.depth == 0 { - return ejp.peekMode() != jpmArrayMode - } - case jpsSawComma: - switch ejp.peekMode() { - // the only valid next token after a comma inside a document is a string (a key) - case jpmObjectMode: - return jtt == jttString - case jpmInvalidMode: - return false - } - } - - _, ok := jpsValidTransitionTokens[ejp.s][jtt] - return ok -} - -// ensureExtValueType returns true if the current value has the expected -// value type for single-key extended JSON types. For example, -// {"$numberInt": v} v must be TypeString -func (ejp *extJSONParser) ensureExtValueType(t bsontype.Type) bool { - switch t { - case bsontype.MinKey, bsontype.MaxKey: - return ejp.v.t == bsontype.Int32 - case bsontype.Undefined: - return ejp.v.t == bsontype.Boolean - case bsontype.Int32, bsontype.Int64, bsontype.Double, bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID: - return ejp.v.t == bsontype.String - default: - return false - } -} - -func (ejp *extJSONParser) pushMode(m jsonParseMode) { - ejp.m = append(ejp.m, m) -} - -func (ejp *extJSONParser) popMode() jsonParseMode { - l := len(ejp.m) - if l == 0 { - return jpmInvalidMode - } - - m := ejp.m[l-1] - ejp.m = ejp.m[:l-1] - - return m -} - -func (ejp *extJSONParser) peekMode() jsonParseMode { - l := len(ejp.m) - if l == 0 { - return jpmInvalidMode - } - - return ejp.m[l-1] -} - -func extendJSONToken(jt *jsonToken) *extJSONValue { - var t bsontype.Type - - switch jt.t { - case jttInt32: - t = bsontype.Int32 - case jttInt64: - t = bsontype.Int64 - case jttDouble: - t = bsontype.Double - case jttString: - t = bsontype.String - case jttBool: - t = bsontype.Boolean - case jttNull: - t = bsontype.Null - default: - return nil - } - - return &extJSONValue{t: t, v: jt.v} -} - -func ensureColon(s jsonParseState, key string) error { - if s != jpsSawColon { - return fmt.Errorf("invalid JSON input: missing colon after key \"%s\"", key) - } - - return nil -} - -func invalidRequestError(s string) error { - return fmt.Errorf("invalid request to read %s", s) -} - -func invalidJSONError(expected string) error { - return fmt.Errorf("invalid JSON input; expected %s", expected) -} - -func invalidJSONErrorForType(expected string, t bsontype.Type) error { - return fmt.Errorf("invalid JSON input; expected %s for %s", expected, t) -} - -func unexpectedTokenError(jt *jsonToken) error { - switch jt.t { - case jttInt32, jttInt64, jttDouble: - return fmt.Errorf("invalid JSON input; unexpected number (%v) at position %d", jt.v, jt.p) - case jttString: - return fmt.Errorf("invalid JSON input; unexpected string (\"%v\") at position %d", jt.v, jt.p) - case jttBool: - return fmt.Errorf("invalid JSON input; unexpected boolean literal (%v) at position %d", jt.v, jt.p) - case jttNull: - return fmt.Errorf("invalid JSON input; unexpected null literal at position %d", jt.p) - case jttEOF: - return fmt.Errorf("invalid JSON input; unexpected end of input at position %d", jt.p) - default: - return fmt.Errorf("invalid JSON input; unexpected %c at position %d", jt.v.(byte), jt.p) - } -} - -func nestingDepthError(p, depth int) error { - return fmt.Errorf("invalid JSON input; nesting too deep (%d levels) at position %d", depth, p) -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go deleted file mode 100644 index 35832d7..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go +++ /dev/null @@ -1,644 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonrw - -import ( - "fmt" - "io" - "sync" - - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON. -type ExtJSONValueReaderPool struct { - pool sync.Pool -} - -// NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool. -func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { - return &ExtJSONValueReaderPool{ - pool: sync.Pool{ - New: func() interface{} { - return new(extJSONValueReader) - }, - }, - } -} - -// Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON. -func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) { - vr := bvrp.pool.Get().(*extJSONValueReader) - return vr.reset(r, canonical) -} - -// Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing -// is inserted into the pool and ok will be false. -func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) { - bvr, ok := vr.(*extJSONValueReader) - if !ok { - return false - } - - bvr, _ = bvr.reset(nil, false) - bvrp.pool.Put(bvr) - return true -} - -type ejvrState struct { - mode mode - vType bsontype.Type - depth int -} - -// extJSONValueReader is for reading extended JSON. -type extJSONValueReader struct { - p *extJSONParser - - stack []ejvrState - frame int -} - -// NewExtJSONValueReader creates a new ValueReader from a given io.Reader -// It will interpret the JSON of r as canonical or relaxed according to the -// given canonical flag -func NewExtJSONValueReader(r io.Reader, canonical bool) (ValueReader, error) { - return newExtJSONValueReader(r, canonical) -} - -func newExtJSONValueReader(r io.Reader, canonical bool) (*extJSONValueReader, error) { - ejvr := new(extJSONValueReader) - return ejvr.reset(r, canonical) -} - -func (ejvr *extJSONValueReader) reset(r io.Reader, canonical bool) (*extJSONValueReader, error) { - p := newExtJSONParser(r, canonical) - typ, err := p.peekType() - - if err != nil { - return nil, ErrInvalidJSON - } - - var m mode - switch typ { - case bsontype.EmbeddedDocument: - m = mTopLevel - case bsontype.Array: - m = mArray - default: - m = mValue - } - - stack := make([]ejvrState, 1, 5) - stack[0] = ejvrState{ - mode: m, - vType: typ, - } - return &extJSONValueReader{ - p: p, - stack: stack, - }, nil -} - -func (ejvr *extJSONValueReader) advanceFrame() { - if ejvr.frame+1 >= len(ejvr.stack) { // We need to grow the stack - length := len(ejvr.stack) - if length+1 >= cap(ejvr.stack) { - // double it - buf := make([]ejvrState, 2*cap(ejvr.stack)+1) - copy(buf, ejvr.stack) - ejvr.stack = buf - } - ejvr.stack = ejvr.stack[:length+1] - } - ejvr.frame++ - - // Clean the stack - ejvr.stack[ejvr.frame].mode = 0 - ejvr.stack[ejvr.frame].vType = 0 - ejvr.stack[ejvr.frame].depth = 0 -} - -func (ejvr *extJSONValueReader) pushDocument() { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = mDocument - ejvr.stack[ejvr.frame].depth = ejvr.p.depth -} - -func (ejvr *extJSONValueReader) pushCodeWithScope() { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = mCodeWithScope -} - -func (ejvr *extJSONValueReader) pushArray() { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = mArray -} - -func (ejvr *extJSONValueReader) push(m mode, t bsontype.Type) { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = m - ejvr.stack[ejvr.frame].vType = t -} - -func (ejvr *extJSONValueReader) pop() { - switch ejvr.stack[ejvr.frame].mode { - case mElement, mValue: - ejvr.frame-- - case mDocument, mArray, mCodeWithScope: - ejvr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc... - } -} - -func (ejvr *extJSONValueReader) skipObject() { - // read entire object until depth returns to 0 (last ending } or ] seen) - depth := 1 - for depth > 0 { - ejvr.p.advanceState() - - // If object is empty, raise depth and continue. When emptyObject is true, the - // parser has already read both the opening and closing brackets of an empty - // object ("{}"), so the next valid token will be part of the parent document, - // not part of the nested document. - // - // If there is a comma, there are remaining fields, emptyObject must be set back - // to false, and comma must be skipped with advanceState(). - if ejvr.p.emptyObject { - if ejvr.p.s == jpsSawComma { - ejvr.p.emptyObject = false - ejvr.p.advanceState() - } - depth-- - continue - } - - switch ejvr.p.s { - case jpsSawBeginObject, jpsSawBeginArray: - depth++ - case jpsSawEndObject, jpsSawEndArray: - depth-- - } - } -} - -func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error { - te := TransitionError{ - name: name, - current: ejvr.stack[ejvr.frame].mode, - destination: destination, - modes: modes, - action: "read", - } - if ejvr.frame != 0 { - te.parent = ejvr.stack[ejvr.frame-1].mode - } - return te -} - -func (ejvr *extJSONValueReader) typeError(t bsontype.Type) error { - return fmt.Errorf("positioned on %s, but attempted to read %s", ejvr.stack[ejvr.frame].vType, t) -} - -func (ejvr *extJSONValueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string, addModes ...mode) error { - switch ejvr.stack[ejvr.frame].mode { - case mElement, mValue: - if ejvr.stack[ejvr.frame].vType != t { - return ejvr.typeError(t) - } - default: - modes := []mode{mElement, mValue} - if addModes != nil { - modes = append(modes, addModes...) - } - return ejvr.invalidTransitionErr(destination, callerName, modes) - } - - return nil -} - -func (ejvr *extJSONValueReader) Type() bsontype.Type { - return ejvr.stack[ejvr.frame].vType -} - -func (ejvr *extJSONValueReader) Skip() error { - switch ejvr.stack[ejvr.frame].mode { - case mElement, mValue: - default: - return ejvr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue}) - } - - defer ejvr.pop() - - t := ejvr.stack[ejvr.frame].vType - switch t { - case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope: - // read entire array, doc or CodeWithScope - ejvr.skipObject() - default: - _, err := ejvr.p.readValue(t) - if err != nil { - return err - } - } - - return nil -} - -func (ejvr *extJSONValueReader) ReadArray() (ArrayReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mTopLevel: // allow reading array from top level - case mArray: - return ejvr, nil - default: - if err := ejvr.ensureElementValue(bsontype.Array, mArray, "ReadArray", mTopLevel, mArray); err != nil { - return nil, err - } - } - - ejvr.pushArray() - - return ejvr, nil -} - -func (ejvr *extJSONValueReader) ReadBinary() (b []byte, btype byte, err error) { - if err := ejvr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil { - return nil, 0, err - } - - v, err := ejvr.p.readValue(bsontype.Binary) - if err != nil { - return nil, 0, err - } - - b, btype, err = v.parseBinary() - - ejvr.pop() - return b, btype, err -} - -func (ejvr *extJSONValueReader) ReadBoolean() (bool, error) { - if err := ejvr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil { - return false, err - } - - v, err := ejvr.p.readValue(bsontype.Boolean) - if err != nil { - return false, err - } - - if v.t != bsontype.Boolean { - return false, fmt.Errorf("expected type bool, but got type %s", v.t) - } - - ejvr.pop() - return v.v.(bool), nil -} - -func (ejvr *extJSONValueReader) ReadDocument() (DocumentReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mTopLevel: - return ejvr, nil - case mElement, mValue: - if ejvr.stack[ejvr.frame].vType != bsontype.EmbeddedDocument { - return nil, ejvr.typeError(bsontype.EmbeddedDocument) - } - - ejvr.pushDocument() - return ejvr, nil - default: - return nil, ejvr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue}) - } -} - -func (ejvr *extJSONValueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) { - if err = ejvr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil { - return "", nil, err - } - - v, err := ejvr.p.readValue(bsontype.CodeWithScope) - if err != nil { - return "", nil, err - } - - code, err = v.parseJavascript() - - ejvr.pushCodeWithScope() - return code, ejvr, err -} - -func (ejvr *extJSONValueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) { - if err = ejvr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil { - return "", primitive.NilObjectID, err - } - - v, err := ejvr.p.readValue(bsontype.DBPointer) - if err != nil { - return "", primitive.NilObjectID, err - } - - ns, oid, err = v.parseDBPointer() - - ejvr.pop() - return ns, oid, err -} - -func (ejvr *extJSONValueReader) ReadDateTime() (int64, error) { - if err := ejvr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.DateTime) - if err != nil { - return 0, err - } - - d, err := v.parseDateTime() - - ejvr.pop() - return d, err -} - -func (ejvr *extJSONValueReader) ReadDecimal128() (primitive.Decimal128, error) { - if err := ejvr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil { - return primitive.Decimal128{}, err - } - - v, err := ejvr.p.readValue(bsontype.Decimal128) - if err != nil { - return primitive.Decimal128{}, err - } - - d, err := v.parseDecimal128() - - ejvr.pop() - return d, err -} - -func (ejvr *extJSONValueReader) ReadDouble() (float64, error) { - if err := ejvr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.Double) - if err != nil { - return 0, err - } - - d, err := v.parseDouble() - - ejvr.pop() - return d, err -} - -func (ejvr *extJSONValueReader) ReadInt32() (int32, error) { - if err := ejvr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.Int32) - if err != nil { - return 0, err - } - - i, err := v.parseInt32() - - ejvr.pop() - return i, err -} - -func (ejvr *extJSONValueReader) ReadInt64() (int64, error) { - if err := ejvr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.Int64) - if err != nil { - return 0, err - } - - i, err := v.parseInt64() - - ejvr.pop() - return i, err -} - -func (ejvr *extJSONValueReader) ReadJavascript() (code string, err error) { - if err = ejvr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil { - return "", err - } - - v, err := ejvr.p.readValue(bsontype.JavaScript) - if err != nil { - return "", err - } - - code, err = v.parseJavascript() - - ejvr.pop() - return code, err -} - -func (ejvr *extJSONValueReader) ReadMaxKey() error { - if err := ejvr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.MaxKey) - if err != nil { - return err - } - - err = v.parseMinMaxKey("max") - - ejvr.pop() - return err -} - -func (ejvr *extJSONValueReader) ReadMinKey() error { - if err := ejvr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.MinKey) - if err != nil { - return err - } - - err = v.parseMinMaxKey("min") - - ejvr.pop() - return err -} - -func (ejvr *extJSONValueReader) ReadNull() error { - if err := ejvr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.Null) - if err != nil { - return err - } - - if v.t != bsontype.Null { - return fmt.Errorf("expected type null but got type %s", v.t) - } - - ejvr.pop() - return nil -} - -func (ejvr *extJSONValueReader) ReadObjectID() (primitive.ObjectID, error) { - if err := ejvr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil { - return primitive.ObjectID{}, err - } - - v, err := ejvr.p.readValue(bsontype.ObjectID) - if err != nil { - return primitive.ObjectID{}, err - } - - oid, err := v.parseObjectID() - - ejvr.pop() - return oid, err -} - -func (ejvr *extJSONValueReader) ReadRegex() (pattern string, options string, err error) { - if err = ejvr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil { - return "", "", err - } - - v, err := ejvr.p.readValue(bsontype.Regex) - if err != nil { - return "", "", err - } - - pattern, options, err = v.parseRegex() - - ejvr.pop() - return pattern, options, err -} - -func (ejvr *extJSONValueReader) ReadString() (string, error) { - if err := ejvr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil { - return "", err - } - - v, err := ejvr.p.readValue(bsontype.String) - if err != nil { - return "", err - } - - if v.t != bsontype.String { - return "", fmt.Errorf("expected type string but got type %s", v.t) - } - - ejvr.pop() - return v.v.(string), nil -} - -func (ejvr *extJSONValueReader) ReadSymbol() (symbol string, err error) { - if err = ejvr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil { - return "", err - } - - v, err := ejvr.p.readValue(bsontype.Symbol) - if err != nil { - return "", err - } - - symbol, err = v.parseSymbol() - - ejvr.pop() - return symbol, err -} - -func (ejvr *extJSONValueReader) ReadTimestamp() (t uint32, i uint32, err error) { - if err = ejvr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil { - return 0, 0, err - } - - v, err := ejvr.p.readValue(bsontype.Timestamp) - if err != nil { - return 0, 0, err - } - - t, i, err = v.parseTimestamp() - - ejvr.pop() - return t, i, err -} - -func (ejvr *extJSONValueReader) ReadUndefined() error { - if err := ejvr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.Undefined) - if err != nil { - return err - } - - err = v.parseUndefined() - - ejvr.pop() - return err -} - -func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mTopLevel, mDocument, mCodeWithScope: - default: - return "", nil, ejvr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope}) - } - - name, t, err := ejvr.p.readKey() - - if err != nil { - if err == ErrEOD { - if ejvr.stack[ejvr.frame].mode == mCodeWithScope { - _, err := ejvr.p.peekType() - if err != nil { - return "", nil, err - } - } - - ejvr.pop() - } - - return "", nil, err - } - - ejvr.push(mElement, t) - return name, ejvr, nil -} - -func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mArray: - default: - return nil, ejvr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray}) - } - - t, err := ejvr.p.peekType() - if err != nil { - if err == ErrEOA { - ejvr.pop() - } - - return nil, err - } - - ejvr.push(mValue, t) - return ejvr, nil -} diff --git a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go b/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go deleted file mode 100644 index ba39c96..0000000 --- a/backend/services/controller/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// -// Based on github.com/golang/go by The Go Authors -// See THIRD-PARTY-NOTICES for original license terms. - -package bsonrw - -import "unicode/utf8" - -// safeSet holds the value true if the ASCII character with the given array -// position can be represented inside a JSON string without any further -// escaping. -// -// All values are true except for the ASCII control characters (0-31), the -// double quote ("), and the backslash character ("\"). -var safeSet = [utf8.RuneSelf]bool{ - ' ': true, - '!': true, - '"': false, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '(': true, - ')': true, - '*': true, - '+': true, - ',': true, - '-': true, - '.': true, - '/': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - ':': true, - ';': true, - '<': true, - '=': true, - '>': true, - '?': true, - '@': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'V': true, - 'W': true, - 'X': true, - 'Y': true, - 'Z': true, - '[': true, - '\\': false, - ']': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '{': true, - '|': true, - '}': true, - '~': true, - '\u007f': true, -} - -// htmlSafeSet holds the value true if the ASCII character with the given -// array position can be safely represented inside a JSON string, embedded -// inside of HTML