diff --git a/go.mod b/go.mod
index 2ec6eca747..a0e3ec4fed 100644
--- a/go.mod
+++ b/go.mod
@@ -71,7 +71,6 @@ require (
github.com/pkg/errors v0.9.1
github.com/pkg/xattr v0.4.12
github.com/prometheus/client_golang v1.23.2
- github.com/r3labs/sse/v2 v2.10.0
github.com/riandyrn/otelchi v0.12.2
github.com/rogpeppe/go-internal v1.14.1
github.com/rs/cors v1.11.1
@@ -89,6 +88,7 @@ require (
github.com/thejerf/suture/v4 v4.0.6
github.com/tidwall/gjson v1.18.0
github.com/tidwall/sjson v1.2.5
+ github.com/tmaxmax/go-sse v0.11.0
github.com/tus/tusd/v2 v2.9.2
github.com/unrolled/secure v1.16.0
github.com/vmihailenco/msgpack/v5 v5.4.1
@@ -394,7 +394,6 @@ require (
golang.org/x/tools v0.42.0 // indirect
google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260406210006-6f92a3bedf2d // indirect
- gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
diff --git a/go.sum b/go.sum
index 6fcc4aa94b..2dc79c7bcb 100644
--- a/go.sum
+++ b/go.sum
@@ -1059,8 +1059,6 @@ github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9
github.com/prometheus/statsd_exporter v0.22.8 h1:Qo2D9ZzaQG+id9i5NYNGmbf1aa/KxKbB9aKfMS+Yib0=
github.com/prometheus/statsd_exporter v0.22.8/go.mod h1:/DzwbTEaFTE0Ojz5PqcSk6+PFHOPWGxdXVr6yC8eFOM=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/r3labs/sse/v2 v2.10.0 h1:hFEkLLFY4LDifoHdiCN/LlGBAdVJYsANaLqNYa1l/v0=
-github.com/r3labs/sse/v2 v2.10.0/go.mod h1:Igau6Whc+F17QUgML1fYe1VPZzTV6EMCnYktEmkNJ7I=
github.com/rainycape/memcache v0.0.0-20150622160815-1031fa0ce2f2/go.mod h1:7tZKcyumwBO6qip7RNQ5r77yrssm9bfCowcLEBcU5IA=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg=
@@ -1217,6 +1215,8 @@ github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYI
github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI=
github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw=
github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ=
+github.com/tmaxmax/go-sse v0.11.0 h1:nogmJM6rJUoOLoAwEKeQe5XlVpt9l7N82SS1jI7lWFg=
+github.com/tmaxmax/go-sse v0.11.0/go.mod h1:u/2kZQR1tyngo1lKaNCj1mJmhXGZWS1Zs5yiSOD+Eg8=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 h1:PM5hJF7HVfNWmCjMdEfbuOBNXSVF2cMFGgQTPdKCbwM=
github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208/go.mod h1:BzWtXXrXzZUvMacR0oF/fbDDgUPO8L36tDMmRAf14ns=
@@ -1420,7 +1420,6 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191116160921-f9c825593386/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -1747,8 +1746,6 @@ google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
-gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y=
-gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/services/sse/pkg/server/http/server.go b/services/sse/pkg/server/http/server.go
index f0d82e506c..8735372896 100644
--- a/services/sse/pkg/server/http/server.go
+++ b/services/sse/pkg/server/http/server.go
@@ -2,12 +2,16 @@ package http
import (
"fmt"
-
stdhttp "net/http"
"github.com/go-chi/chi/v5"
chimiddleware "github.com/go-chi/chi/v5/middleware"
"github.com/google/uuid"
+ "github.com/riandyrn/otelchi"
+ "go-micro.dev/v4"
+
+ "github.com/opencloud-eu/reva/v2/pkg/events"
+
"github.com/opencloud-eu/opencloud/pkg/account"
"github.com/opencloud-eu/opencloud/pkg/cors"
"github.com/opencloud-eu/opencloud/pkg/middleware"
@@ -15,9 +19,6 @@ import (
"github.com/opencloud-eu/opencloud/pkg/tracing"
"github.com/opencloud-eu/opencloud/pkg/version"
svc "github.com/opencloud-eu/opencloud/services/sse/pkg/service"
- "github.com/opencloud-eu/reva/v2/pkg/events"
- "github.com/riandyrn/otelchi"
- "go-micro.dev/v4"
)
// Service is the service interface
@@ -82,12 +83,17 @@ func Server(opts ...Option) (http.Service, error) {
return http.Service{}, err
}
- handle, err := svc.NewSSE(options.Config, options.Logger, ch, mux)
+ sseHandler, err := svc.NewSSEHandler(options.Context, options.Config, options.Logger, ch)
+ if err != nil {
+ return http.Service{}, err
+ }
+
+ svcHandler, err := svc.New(mux, sseHandler)
if err != nil {
return http.Service{}, err
}
- if err := micro.RegisterHandler(service.Server(), handle); err != nil {
+ if err := micro.RegisterHandler(service.Server(), svcHandler); err != nil {
return http.Service{}, err
}
diff --git a/services/sse/pkg/service/service.go b/services/sse/pkg/service/service.go
index 97a211e215..1657aabec2 100644
--- a/services/sse/pkg/service/service.go
+++ b/services/sse/pkg/service/service.go
@@ -2,102 +2,22 @@ package service
import (
"net/http"
- "time"
"github.com/go-chi/chi/v5"
- "github.com/r3labs/sse/v2"
-
- revactx "github.com/opencloud-eu/reva/v2/pkg/ctx"
- "github.com/opencloud-eu/reva/v2/pkg/events"
-
- "github.com/opencloud-eu/opencloud/pkg/log"
- "github.com/opencloud-eu/opencloud/services/sse/pkg/config"
)
-// SSE defines implements the business logic for Service.
-type SSE struct {
- c *config.Config
- l log.Logger
- m *chi.Mux
- sse *sse.Server
- evChannel <-chan events.Event
-}
-
-// NewSSE returns a service implementation for Service.
-func NewSSE(c *config.Config, l log.Logger, ch <-chan events.Event, mux *chi.Mux) (SSE, error) {
- s := SSE{
- c: c,
- l: l,
- m: mux,
- sse: sse.New(),
- evChannel: ch,
- }
- mux.Route("/ocs/v2.php/apps/notifications/api/v1/notifications", func(r chi.Router) {
- r.Get("/sse", s.HandleSSE)
- })
-
- go s.ListenForEvents()
-
- return s, nil
+type Service struct {
+ handler http.Handler
}
-// ServeHTTP fulfills Handler interface
-func (s SSE) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- s.m.ServeHTTP(w, r)
-}
+func New(mux *chi.Mux, sseHandler http.Handler) (Service, error) {
+ mux.Get("/ocs/v2.php/apps/notifications/api/v1/notifications/sse", sseHandler.ServeHTTP)
-// ListenForEvents listens for events
-func (s SSE) ListenForEvents() {
- for e := range s.evChannel {
- switch ev := e.Event.(type) {
- default:
- s.l.Error().Interface("event", ev).Msg("unhandled event")
- case events.SendSSE:
- for _, uid := range ev.UserIDs {
- s.sse.Publish(uid, &sse.Event{
- Event: []byte(ev.Type),
- Data: ev.Message,
- })
- }
- }
- }
+ return Service{
+ handler: mux,
+ }, nil
}
-// HandleSSE is the GET handler for events
-func (s SSE) HandleSSE(w http.ResponseWriter, r *http.Request) {
- u, ok := revactx.ContextGetUser(r.Context())
- if !ok {
- s.l.Error().Msg("sse: no user in context")
- w.WriteHeader(http.StatusInternalServerError)
- return
- }
-
- uid := u.GetId().GetOpaqueId()
- if uid == "" {
- s.l.Error().Msg("sse: user in context is broken")
- w.WriteHeader(http.StatusInternalServerError)
- return
- }
-
- stream := s.sse.CreateStream(uid)
- stream.AutoReplay = false
-
- if s.c.KeepAliveInterval != 0 {
- ticker := time.NewTicker(s.c.KeepAliveInterval)
- defer ticker.Stop()
- go func() {
- for range ticker.C {
- s.sse.Publish(uid, &sse.Event{
- Comment: []byte("keepalive"),
- })
- }
- }()
- }
-
- // add stream to URL
- q := r.URL.Query()
- q.Set("stream", uid)
- r.URL.RawQuery = q.Encode()
-
- s.sse.ServeHTTP(w, r)
+func (s Service) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ s.handler.ServeHTTP(w, r)
}
diff --git a/services/sse/pkg/service/sse.go b/services/sse/pkg/service/sse.go
new file mode 100644
index 0000000000..92c65b643d
--- /dev/null
+++ b/services/sse/pkg/service/sse.go
@@ -0,0 +1,114 @@
+package service
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/tmaxmax/go-sse"
+
+ revactx "github.com/opencloud-eu/reva/v2/pkg/ctx"
+ "github.com/opencloud-eu/reva/v2/pkg/events"
+
+ "github.com/opencloud-eu/opencloud/pkg/log"
+ "github.com/opencloud-eu/opencloud/services/sse/pkg/config"
+)
+
+const (
+ SSETopicAllUsers = "all"
+)
+
+// SSEHandler defines implements the business logic for Service.
+type SSEHandler struct {
+ conf *config.Config
+ logger log.Logger
+ server *sse.Server
+ channel <-chan events.Event
+}
+
+// NewSSEHandler returns a service implementation for Service.
+func NewSSEHandler(ctx context.Context, conf *config.Config, logger log.Logger, ch <-chan events.Event) (SSEHandler, error) {
+ handler := SSEHandler{
+ conf: conf,
+ logger: logger,
+ channel: ch,
+ }
+
+ handler.server = &sse.Server{
+ OnSession: func(_ http.ResponseWriter, r *http.Request) (topics []string, allowed bool) {
+ return handler.topics(r)
+ },
+ }
+
+ go func() {
+ select {
+ case <-ctx.Done():
+ if err := handler.server.Shutdown(ctx); err != nil {
+ logger.Error().Err(err).Msg("failed to shutdown SSE handler")
+ }
+ return
+ }
+ }()
+
+ go handler.listen()
+
+ return handler, nil
+}
+
+// ServeHTTP fulfills Handler interface
+func (h SSEHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ topics, ok := h.topics(r)
+ if !ok {
+ h.logger.Error().Msg("sse: failed to get topics")
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+
+ if h.conf.KeepAliveInterval != 0 {
+ ticker := time.NewTicker(h.conf.KeepAliveInterval)
+ defer ticker.Stop()
+ go func() {
+ for range ticker.C {
+ m := &sse.Message{}
+ m.AppendData("keep-alive")
+ if err := h.server.Publish(m, topics...); err != nil {
+ h.logger.Error().Err(err).Msg("sse: failed to publish message")
+ }
+ }
+ }()
+ }
+
+ h.server.ServeHTTP(w, r)
+}
+
+// ListenForEvents listens for events
+func (h SSEHandler) listen() {
+ for e := range h.channel {
+ switch ev := e.Event.(type) {
+ default:
+ h.logger.Error().Interface("event", ev).Msg("unhandled event")
+ case events.SendSSE:
+ m := &sse.Message{
+ Type: sse.Type(ev.Type),
+ }
+ m.AppendData(string(ev.Message))
+ if err := h.server.Publish(m, ev.UserIDs...); err != nil {
+ h.logger.Error().Err(err).Msg("sse: failed to publish message")
+ }
+ }
+ }
+}
+
+func (h SSEHandler) topics(r *http.Request) ([]string, bool) {
+ u, ok := revactx.ContextGetUser(r.Context())
+ if !ok {
+ return nil, false
+ }
+
+ uid := u.GetId().GetOpaqueId()
+ if uid == "" {
+ return nil, false
+ }
+
+ return append([]string{SSETopicAllUsers}, uid), true
+}
diff --git a/services/sse/pkg/service/sse_test.go b/services/sse/pkg/service/sse_test.go
new file mode 100644
index 0000000000..bb462b3ab4
--- /dev/null
+++ b/services/sse/pkg/service/sse_test.go
@@ -0,0 +1,122 @@
+package service_test
+
+import (
+ "bufio"
+ "context"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+
+ userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
+ revaContext "github.com/opencloud-eu/reva/v2/pkg/ctx"
+ "github.com/opencloud-eu/reva/v2/pkg/events"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/opencloud-eu/opencloud/pkg/log"
+ "github.com/opencloud-eu/opencloud/services/sse/pkg/config"
+ "github.com/opencloud-eu/opencloud/services/sse/pkg/service"
+)
+
+func TestNewSSEHandler(t *testing.T) {
+ eventChan := make(chan events.Event)
+ defer close(eventChan)
+
+ t.Run("initialization", func(t *testing.T) {
+ _, err := service.NewSSEHandler(context.Background(), &config.Config{}, log.NopLogger(), eventChan)
+ assert.NoError(t, err)
+ })
+}
+
+func TestSSEHandler_ServeHTTP(t *testing.T) {
+ eventChan := make(chan events.Event)
+ defer close(eventChan)
+
+ handler, _ := service.NewSSEHandler(context.Background(), &config.Config{}, log.NopLogger(), eventChan)
+
+ t.Run("fails without user topic", func(t *testing.T) {
+ req := httptest.NewRequest(http.MethodGet, "/", nil)
+
+ rr := httptest.NewRecorder()
+ handler.ServeHTTP(rr, req)
+
+ assert.Equal(t, http.StatusInternalServerError, rr.Code)
+ })
+
+ t.Run("handles sse events", func(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx := revaContext.ContextSetUser(r.Context(), &userv1beta1.User{
+ Id: &userv1beta1.UserId{
+ OpaqueId: "user_1",
+ },
+ })
+
+ if f, ok := w.(http.Flusher); ok {
+ f.Flush()
+ }
+
+ handler.ServeHTTP(w, r.WithContext(ctx))
+ }))
+ defer ts.Close()
+
+ req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, ts.URL, nil)
+ require.NoError(t, err)
+
+ resp, err := http.DefaultClient.Do(req)
+ require.NoError(t, err)
+ defer func() {
+ _ = resp.Body.Close()
+ }()
+
+ assert.Equal(t, http.StatusOK, resp.StatusCode)
+
+ reader := bufio.NewReader(resp.Body)
+
+ eventChan <- events.Event{
+ Event: events.SendSSE{
+ UserIDs: []string{"user_1"},
+ Type: "whatever",
+ Message: []byte("u1_m1"),
+ },
+ }
+
+ eventChan <- events.Event{
+ Event: events.SendSSE{
+ UserIDs: []string{"user_1"},
+ Type: "whatever",
+ Message: []byte("u1_m2"),
+ },
+ }
+
+ eventChan <- events.Event{
+ Event: events.SendSSE{
+ UserIDs: []string{"user_2"},
+ Type: "whatever",
+ Message: []byte("u2_m1"),
+ },
+ }
+
+ eventChan <- events.Event{
+ Event: events.SendSSE{
+ UserIDs: []string{"all"},
+ Type: "whatever",
+ Message: []byte("all_m1"),
+ },
+ }
+
+ var messages []string
+ for len(messages) < 3 {
+ line, err := reader.ReadString('\n')
+ require.NoError(t, err)
+
+ if line == "\n" || !strings.HasPrefix(line, "data: ") {
+ continue
+ }
+
+ messages = append(messages, strings.TrimSpace(strings.TrimPrefix(line, "data: ")))
+ }
+
+ assert.Equal(t, []string{"u1_m1", "u1_m2", "all_m1"}, messages)
+ })
+}
diff --git a/vendor/github.com/r3labs/sse/v2/.gitignore b/vendor/github.com/r3labs/sse/v2/.gitignore
deleted file mode 100644
index d48c759d6c..0000000000
--- a/vendor/github.com/r3labs/sse/v2/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-.idea
-.vscode
\ No newline at end of file
diff --git a/vendor/github.com/r3labs/sse/v2/.golangci.yml b/vendor/github.com/r3labs/sse/v2/.golangci.yml
deleted file mode 100644
index 5a76e9a0fd..0000000000
--- a/vendor/github.com/r3labs/sse/v2/.golangci.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-linters:
- enable-all: true
- disable:
- - gofmt
- - gofumpt
- - goimports
- - golint # deprecated
- - interfacer # deprecated
- - maligned # deprecated
- - scopelint # deprecated
- - varnamelen
-
-linters-settings:
- govet:
- enable-all: true
diff --git a/vendor/github.com/r3labs/sse/v2/CONTRIBUTING.md b/vendor/github.com/r3labs/sse/v2/CONTRIBUTING.md
deleted file mode 100644
index b9c7859d3c..0000000000
--- a/vendor/github.com/r3labs/sse/v2/CONTRIBUTING.md
+++ /dev/null
@@ -1,80 +0,0 @@
-# Contributing guidelines
-
-Looking to contribute something to this project? Here's how you can help:
-
-Please take a moment to review this document in order to make the contribution process easy and effective for everyone involved.
-
-Following these guidelines helps to communicate that you respect the time of the developers managing and developing this open source project. In return, they should reciprocate that respect in addressing your issue or assessing patches and features.
-
-We also have a [code of conduct](https://ernest.io/conduct).
-
-## Using the issue tracker
-
-The issue tracker is the preferred channel for [bug reports](#bug-reports), [features requests](#feature-requests) and [submitting pull requests](#pull-requests), but please respect the following restrictions:
-
-* Please **do not** use the issue tracker for personal support requests.
-
-* Please **do not** derail issues. Keep the discussion on topic and
- respect the opinions of others.
-
-
-## Bug reports
-
-A bug is a _demonstrable problem_ that is caused by the code in the repository.
-Good bug reports are extremely helpful - thank you!
-
-Guidelines for bug reports:
-
-1. **Use the GitHub issue search** — check if the issue has already been
- reported.
-
-2. **Check if the issue has been fixed** — try to reproduce it using the
- latest `master` or `develop` branch in the repository.
-
-3. **Isolate the problem** — create a reduced test case and a live example.
-
-A good bug report shouldn't leave others needing to chase you up for more
-information. Please try to be as detailed as possible in your report. What is
-your environment? What steps will reproduce the issue? Which environment experience the problem? What would you expect to be the outcome? All these
-details will help people to fix any potential bugs.
-
-Example:
-
-> Short and descriptive example bug report title
->
-> A summary of the issue and the environment in which it occurs. If
-> suitable, include the steps required to reproduce the bug.
->
-> 1. This is the first step
-> 2. This is the second step
-> 3. Further steps, etc.
->
-> `` - a link to the reduced test case
->
-> Any other information you want to share that is relevant to the issue being
-> reported. This might include the lines of code that you have identified as
-> causing the bug, and potential solutions (and your opinions on their
-> merits).
-
-
-## Feature requests
-
-Feature requests are welcome. But take a moment to find out whether your idea
-fits with the scope and aims of the project. It's up to *you* to make a strong
-case to convince the project's developers of the merits of this feature. Please
-provide as much detail and context as possible.
-
-
-## Pull requests
-
-Good pull requests - patches, improvements, new features - are a fantastic
-help. They should remain focused in scope and avoid containing unrelated
-commits.
-
-[**Please ask first**](https://ernest.io/community) before embarking on any significant pull request (e.g.
-implementing features, refactoring code, porting to a different language),
-otherwise you risk spending a lot of time working on something that the
-project's developers might not want to merge into the project.
-
-Please adhere to the coding conventions used throughout a project (indentation,
-accurate comments, etc.) and any other requirements (such as test coverage).
diff --git a/vendor/github.com/r3labs/sse/v2/LICENSE b/vendor/github.com/r3labs/sse/v2/LICENSE
deleted file mode 100644
index a612ad9813..0000000000
--- a/vendor/github.com/r3labs/sse/v2/LICENSE
+++ /dev/null
@@ -1,373 +0,0 @@
-Mozilla Public License Version 2.0
-==================================
-
-1. Definitions
---------------
-
-1.1. "Contributor"
- means each individual or legal entity that creates, contributes to
- the creation of, or owns Covered Software.
-
-1.2. "Contributor Version"
- means the combination of the Contributions of others (if any) used
- by a Contributor and that particular Contributor's Contribution.
-
-1.3. "Contribution"
- means Covered Software of a particular Contributor.
-
-1.4. "Covered Software"
- means Source Code Form to which the initial Contributor has attached
- the notice in Exhibit A, the Executable Form of such Source Code
- Form, and Modifications of such Source Code Form, in each case
- including portions thereof.
-
-1.5. "Incompatible With Secondary Licenses"
- means
-
- (a) that the initial Contributor has attached the notice described
- in Exhibit B to the Covered Software; or
-
- (b) that the Covered Software was made available under the terms of
- version 1.1 or earlier of the License, but not also under the
- terms of a Secondary License.
-
-1.6. "Executable Form"
- means any form of the work other than Source Code Form.
-
-1.7. "Larger Work"
- means a work that combines Covered Software with other material, in
- a separate file or files, that is not Covered Software.
-
-1.8. "License"
- means this document.
-
-1.9. "Licensable"
- means having the right to grant, to the maximum extent possible,
- whether at the time of the initial grant or subsequently, any and
- all of the rights conveyed by this License.
-
-1.10. "Modifications"
- means any of the following:
-
- (a) any file in Source Code Form that results from an addition to,
- deletion from, or modification of the contents of Covered
- Software; or
-
- (b) any new file in Source Code Form that contains any Covered
- Software.
-
-1.11. "Patent Claims" of a Contributor
- means any patent claim(s), including without limitation, method,
- process, and apparatus claims, in any patent Licensable by such
- Contributor that would be infringed, but for the grant of the
- License, by the making, using, selling, offering for sale, having
- made, import, or transfer of either its Contributions or its
- Contributor Version.
-
-1.12. "Secondary License"
- means either the GNU General Public License, Version 2.0, the GNU
- Lesser General Public License, Version 2.1, the GNU Affero General
- Public License, Version 3.0, or any later versions of those
- licenses.
-
-1.13. "Source Code Form"
- means the form of the work preferred for making modifications.
-
-1.14. "You" (or "Your")
- means an individual or a legal entity exercising rights under this
- License. For legal entities, "You" includes any entity that
- controls, is controlled by, or is under common control with You. For
- purposes of this definition, "control" means (a) the power, direct
- or indirect, to cause the direction or management of such entity,
- whether by contract or otherwise, or (b) ownership of more than
- fifty percent (50%) of the outstanding shares or beneficial
- ownership of such entity.
-
-2. License Grants and Conditions
---------------------------------
-
-2.1. Grants
-
-Each Contributor hereby grants You a world-wide, royalty-free,
-non-exclusive license:
-
-(a) under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or
- as part of a Larger Work; and
-
-(b) under Patent Claims of such Contributor to make, use, sell, offer
- for sale, have made, import, and otherwise transfer either its
- Contributions or its Contributor Version.
-
-2.2. Effective Date
-
-The licenses granted in Section 2.1 with respect to any Contribution
-become effective for each Contribution on the date the Contributor first
-distributes such Contribution.
-
-2.3. Limitations on Grant Scope
-
-The licenses granted in this Section 2 are the only rights granted under
-this License. No additional rights or licenses will be implied from the
-distribution or licensing of Covered Software under this License.
-Notwithstanding Section 2.1(b) above, no patent license is granted by a
-Contributor:
-
-(a) for any code that a Contributor has removed from Covered Software;
- or
-
-(b) for infringements caused by: (i) Your and any other third party's
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
-(c) under Patent Claims infringed by Covered Software in the absence of
- its Contributions.
-
-This License does not grant any rights in the trademarks, service marks,
-or logos of any Contributor (except as may be necessary to comply with
-the notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
-No Contributor makes additional grants as a result of Your choice to
-distribute the Covered Software under a subsequent version of this
-License (see Section 10.2) or under the terms of a Secondary License (if
-permitted under the terms of Section 3.3).
-
-2.5. Representation
-
-Each Contributor represents that the Contributor believes its
-Contributions are its original creation(s) or it has sufficient rights
-to grant the rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
-This License is not intended to limit any rights You have under
-applicable copyright doctrines of fair use, fair dealing, or other
-equivalents.
-
-2.7. Conditions
-
-Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
-in Section 2.1.
-
-3. Responsibilities
--------------------
-
-3.1. Distribution of Source Form
-
-All distribution of Covered Software in Source Code Form, including any
-Modifications that You create or to which You contribute, must be under
-the terms of this License. You must inform recipients that the Source
-Code Form of the Covered Software is governed by the terms of this
-License, and how they can obtain a copy of this License. You may not
-attempt to alter or restrict the recipients' rights in the Source Code
-Form.
-
-3.2. Distribution of Executable Form
-
-If You distribute Covered Software in Executable Form then:
-
-(a) such Covered Software must also be made available in Source Code
- Form, as described in Section 3.1, and You must inform recipients of
- the Executable Form how they can obtain a copy of such Source Code
- Form by reasonable means in a timely manner, at a charge no more
- than the cost of distribution to the recipient; and
-
-(b) You may distribute such Executable Form under the terms of this
- License, or sublicense it under different terms, provided that the
- license for the Executable Form does not attempt to limit or alter
- the recipients' rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
-You may create and distribute a Larger Work under terms of Your choice,
-provided that You also comply with the requirements of this License for
-the Covered Software. If the Larger Work is a combination of Covered
-Software with a work governed by one or more Secondary Licenses, and the
-Covered Software is not Incompatible With Secondary Licenses, this
-License permits You to additionally distribute such Covered Software
-under the terms of such Secondary License(s), so that the recipient of
-the Larger Work may, at their option, further distribute the Covered
-Software under the terms of either this License or such Secondary
-License(s).
-
-3.4. Notices
-
-You may not remove or alter the substance of any license notices
-(including copyright notices, patent notices, disclaimers of warranty,
-or limitations of liability) contained within the Source Code Form of
-the Covered Software, except that You may alter any license notices to
-the extent required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
-You may choose to offer, and to charge a fee for, warranty, support,
-indemnity or liability obligations to one or more recipients of Covered
-Software. However, You may do so only on Your own behalf, and not on
-behalf of any Contributor. You must make it absolutely clear that any
-such warranty, support, indemnity, or liability obligation is offered by
-You alone, and You hereby agree to indemnify every Contributor for any
-liability incurred by such Contributor as a result of warranty, support,
-indemnity or liability terms You offer. You may include additional
-disclaimers of warranty and limitations of liability specific to any
-jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
----------------------------------------------------
-
-If it is impossible for You to comply with any of the terms of this
-License with respect to some or all of the Covered Software due to
-statute, judicial order, or regulation then You must: (a) comply with
-the terms of this License to the maximum extent possible; and (b)
-describe the limitations and the code they affect. Such description must
-be placed in a text file included with all distributions of the Covered
-Software under this License. Except to the extent prohibited by statute
-or regulation, such description must be sufficiently detailed for a
-recipient of ordinary skill to be able to understand it.
-
-5. Termination
---------------
-
-5.1. The rights granted under this License will terminate automatically
-if You fail to comply with any of its terms. However, if You become
-compliant, then the rights granted under this License from a particular
-Contributor are reinstated (a) provisionally, unless and until such
-Contributor explicitly and finally terminates Your grants, and (b) on an
-ongoing basis, if such Contributor fails to notify You of the
-non-compliance by some reasonable means prior to 60 days after You have
-come back into compliance. Moreover, Your grants from a particular
-Contributor are reinstated on an ongoing basis if such Contributor
-notifies You of the non-compliance by some reasonable means, this is the
-first time You have received notice of non-compliance with this License
-from such Contributor, and You become compliant prior to 30 days after
-Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
-infringement claim (excluding declaratory judgment actions,
-counter-claims, and cross-claims) alleging that a Contributor Version
-directly or indirectly infringes any patent, then the rights granted to
-You by any and all Contributors for the Covered Software under Section
-2.1 of this License shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all
-end user license agreements (excluding distributors and resellers) which
-have been validly granted by You or Your distributors under this License
-prior to termination shall survive termination.
-
-************************************************************************
-* *
-* 6. Disclaimer of Warranty *
-* ------------------------- *
-* *
-* Covered Software is provided under this License on an "as is" *
-* basis, without warranty of any kind, either expressed, implied, or *
-* statutory, including, without limitation, warranties that the *
-* Covered Software is free of defects, merchantable, fit for a *
-* particular purpose or non-infringing. The entire risk as to the *
-* quality and performance of the Covered Software is with You. *
-* Should any Covered Software prove defective in any respect, You *
-* (not any Contributor) assume the cost of any necessary servicing, *
-* repair, or correction. This disclaimer of warranty constitutes an *
-* essential part of this License. No use of any Covered Software is *
-* authorized under this License except under this disclaimer. *
-* *
-************************************************************************
-
-************************************************************************
-* *
-* 7. Limitation of Liability *
-* -------------------------- *
-* *
-* Under no circumstances and under no legal theory, whether tort *
-* (including negligence), contract, or otherwise, shall any *
-* Contributor, or anyone who distributes Covered Software as *
-* permitted above, be liable to You for any direct, indirect, *
-* special, incidental, or consequential damages of any character *
-* including, without limitation, damages for lost profits, loss of *
-* goodwill, work stoppage, computer failure or malfunction, or any *
-* and all other commercial damages or losses, even if such party *
-* shall have been informed of the possibility of such damages. This *
-* limitation of liability shall not apply to liability for death or *
-* personal injury resulting from such party's negligence to the *
-* extent applicable law prohibits such limitation. Some *
-* jurisdictions do not allow the exclusion or limitation of *
-* incidental or consequential damages, so this exclusion and *
-* limitation may not apply to You. *
-* *
-************************************************************************
-
-8. Litigation
--------------
-
-Any litigation relating to this License may be brought only in the
-courts of a jurisdiction where the defendant maintains its principal
-place of business and such litigation shall be governed by laws of that
-jurisdiction, without reference to its conflict-of-law provisions.
-Nothing in this Section shall prevent a party's ability to bring
-cross-claims or counter-claims.
-
-9. Miscellaneous
-----------------
-
-This License represents the complete agreement concerning the subject
-matter hereof. If any provision of this License is held to be
-unenforceable, such provision shall be reformed only to the extent
-necessary to make it enforceable. Any law or regulation which provides
-that the language of a contract shall be construed against the drafter
-shall not be used to construe this License against a Contributor.
-
-10. Versions of the License
----------------------------
-
-10.1. New Versions
-
-Mozilla Foundation is the license steward. Except as provided in Section
-10.3, no one other than the license steward has the right to modify or
-publish new versions of this License. Each version will be given a
-distinguishing version number.
-
-10.2. Effect of New Versions
-
-You may distribute the Covered Software under the terms of the version
-of the License under which You originally received the Covered Software,
-or under the terms of any subsequent version published by the license
-steward.
-
-10.3. Modified Versions
-
-If you create software not governed by this License, and you want to
-create a new license for such software, you may create and use a
-modified version of this License if you rename the license and remove
-any references to the name of the license steward (except to note that
-such modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary
-Licenses
-
-If You choose to distribute Source Code Form that is Incompatible With
-Secondary Licenses under the terms of this version of the License, the
-notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
--------------------------------------------
-
- This Source Code Form is subject to the terms of the Mozilla Public
- License, v. 2.0. If a copy of the MPL was not distributed with this
- file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular
-file, then You may include the notice in a location (such as a LICENSE
-file in a relevant directory) where a recipient would be likely to look
-for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - "Incompatible With Secondary Licenses" Notice
----------------------------------------------------------
-
- This Source Code Form is "Incompatible With Secondary Licenses", as
- defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/r3labs/sse/v2/Makefile b/vendor/github.com/r3labs/sse/v2/Makefile
deleted file mode 100644
index a63b7001e0..0000000000
--- a/vendor/github.com/r3labs/sse/v2/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-install:
- go install -v
-
-build:
- go build -v ./...
-
-lint:
- golint ./...
- go vet ./...
-
-test:
- go test -v ./... --cover
-
-deps:
- go get -u gopkg.in/cenkalti/backoff.v1
- go get -u github.com/golang/lint/golint
- go get -u github.com/stretchr/testify
-
-clean:
- go clean
diff --git a/vendor/github.com/r3labs/sse/v2/README.md b/vendor/github.com/r3labs/sse/v2/README.md
deleted file mode 100644
index c2201be698..0000000000
--- a/vendor/github.com/r3labs/sse/v2/README.md
+++ /dev/null
@@ -1,191 +0,0 @@
-# SSE - Server Sent Events Client/Server Library for Go
-
-## Synopsis
-
-SSE is a client/server implementation for Server Sent Events for Golang.
-
-## Build status
-
-* Master: [](https://circleci.com/gh/r3labs/sse)
-
-## Quick start
-
-To install:
-```
-go get github.com/r3labs/sse/v2
-```
-
-To Test:
-
-```sh
-$ make deps
-$ make test
-```
-
-#### Example Server
-
-There are two parts of the server. It is comprised of the message scheduler and a http handler function.
-The messaging system is started when running:
-
-```go
-func main() {
- server := sse.New()
-}
-```
-
-To add a stream to this handler:
-
-```go
-func main() {
- server := sse.New()
- server.CreateStream("messages")
-}
-```
-
-This creates a new stream inside of the scheduler. Seeing as there are no consumers, publishing a message to this channel will do nothing.
-Clients can connect to this stream once the http handler is started by specifying _stream_ as a url parameter, like so:
-
-```
-http://server/events?stream=messages
-```
-
-
-In order to start the http server:
-
-```go
-func main() {
- server := sse.New()
-
- // Create a new Mux and set the handler
- mux := http.NewServeMux()
- mux.HandleFunc("/events", server.ServeHTTP)
-
- http.ListenAndServe(":8080", mux)
-}
-```
-
-To publish messages to a stream:
-
-```go
-func main() {
- server := sse.New()
-
- // Publish a payload to the stream
- server.Publish("messages", &sse.Event{
- Data: []byte("ping"),
- })
-}
-```
-
-Please note there must be a stream with the name you specify and there must be subscribers to that stream
-
-A way to detect disconnected clients:
-
-```go
-func main() {
- server := sse.New()
-
- mux := http.NewServeMux()
- mux.HandleFunc("/events", func(w http.ResponseWriter, r *http.Request) {
- go func() {
- // Received Browser Disconnection
- <-r.Context().Done()
- println("The client is disconnected here")
- return
- }()
-
- server.ServeHTTP(w, r)
- })
-
- http.ListenAndServe(":8080", mux)
-}
-```
-
-#### Example Client
-
-The client exposes a way to connect to an SSE server. The client can also handle multiple events under the same url.
-
-To create a new client:
-
-```go
-func main() {
- client := sse.NewClient("http://server/events")
-}
-```
-
-To subscribe to an event stream, please use the Subscribe function. This accepts the name of the stream and a handler function:
-
-```go
-func main() {
- client := sse.NewClient("http://server/events")
-
- client.Subscribe("messages", func(msg *sse.Event) {
- // Got some data!
- fmt.Println(msg.Data)
- })
-}
-```
-
-Please note that this function will block the current thread. You can run this function in a go routine.
-
-If you wish to have events sent to a channel, you can use SubscribeChan:
-
-```go
-func main() {
- events := make(chan *sse.Event)
-
- client := sse.NewClient("http://server/events")
- client.SubscribeChan("messages", events)
-}
-```
-
-#### HTTP client parameters
-
-To add additional parameters to the http client, such as disabling ssl verification for self signed certs, you can override the http client or update its options:
-
-```go
-func main() {
- client := sse.NewClient("http://server/events")
- client.Connection.Transport = &http.Transport{
- TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
- }
-}
-```
-
-#### URL query parameters
-
-To set custom query parameters on the client or disable the stream parameter altogether:
-
-```go
-func main() {
- client := sse.NewClient("http://server/events?search=example")
-
- client.SubscribeRaw(func(msg *sse.Event) {
- // Got some data!
- fmt.Println(msg.Data)
- })
-}
-```
-
-
-## Contributing
-
-Please read through our
-[contributing guidelines](CONTRIBUTING.md).
-Included are directions for opening issues, coding standards, and notes on
-development.
-
-Moreover, if your pull request contains patches or features, you must include
-relevant unit tests.
-
-## Versioning
-
-For transparency into our release cycle and in striving to maintain backward
-compatibility, this project is maintained under [the Semantic Versioning guidelines](http://semver.org/).
-
-## Copyright and License
-
-Code and documentation copyright since 2015 r3labs.io authors.
-
-Code released under
-[the Mozilla Public License Version 2.0](LICENSE).
diff --git a/vendor/github.com/r3labs/sse/v2/client.go b/vendor/github.com/r3labs/sse/v2/client.go
deleted file mode 100644
index 61772b624d..0000000000
--- a/vendor/github.com/r3labs/sse/v2/client.go
+++ /dev/null
@@ -1,390 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-package sse
-
-import (
- "bytes"
- "context"
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "net/http"
- "sync"
- "sync/atomic"
- "time"
-
- "gopkg.in/cenkalti/backoff.v1"
-)
-
-var (
- headerID = []byte("id:")
- headerData = []byte("data:")
- headerEvent = []byte("event:")
- headerRetry = []byte("retry:")
-)
-
-func ClientMaxBufferSize(s int) func(c *Client) {
- return func(c *Client) {
- c.maxBufferSize = s
- }
-}
-
-// ConnCallback defines a function to be called on a particular connection event
-type ConnCallback func(c *Client)
-
-// ResponseValidator validates a response
-type ResponseValidator func(c *Client, resp *http.Response) error
-
-// Client handles an incoming server stream
-type Client struct {
- Retry time.Time
- ReconnectStrategy backoff.BackOff
- disconnectcb ConnCallback
- connectedcb ConnCallback
- subscribed map[chan *Event]chan struct{}
- Headers map[string]string
- ReconnectNotify backoff.Notify
- ResponseValidator ResponseValidator
- Connection *http.Client
- URL string
- LastEventID atomic.Value // []byte
- maxBufferSize int
- mu sync.Mutex
- EncodingBase64 bool
- Connected bool
-}
-
-// NewClient creates a new client
-func NewClient(url string, opts ...func(c *Client)) *Client {
- c := &Client{
- URL: url,
- Connection: &http.Client{},
- Headers: make(map[string]string),
- subscribed: make(map[chan *Event]chan struct{}),
- maxBufferSize: 1 << 16,
- }
-
- for _, opt := range opts {
- opt(c)
- }
-
- return c
-}
-
-// Subscribe to a data stream
-func (c *Client) Subscribe(stream string, handler func(msg *Event)) error {
- return c.SubscribeWithContext(context.Background(), stream, handler)
-}
-
-// SubscribeWithContext to a data stream with context
-func (c *Client) SubscribeWithContext(ctx context.Context, stream string, handler func(msg *Event)) error {
- operation := func() error {
- resp, err := c.request(ctx, stream)
- if err != nil {
- return err
- }
- if validator := c.ResponseValidator; validator != nil {
- err = validator(c, resp)
- if err != nil {
- return err
- }
- } else if resp.StatusCode != 200 {
- resp.Body.Close()
- return fmt.Errorf("could not connect to stream: %s", http.StatusText(resp.StatusCode))
- }
- defer resp.Body.Close()
-
- reader := NewEventStreamReader(resp.Body, c.maxBufferSize)
- eventChan, errorChan := c.startReadLoop(reader)
-
- for {
- select {
- case err = <-errorChan:
- return err
- case msg := <-eventChan:
- handler(msg)
- }
- }
- }
-
- // Apply user specified reconnection strategy or default to standard NewExponentialBackOff() reconnection method
- var err error
- if c.ReconnectStrategy != nil {
- err = backoff.RetryNotify(operation, c.ReconnectStrategy, c.ReconnectNotify)
- } else {
- err = backoff.RetryNotify(operation, backoff.NewExponentialBackOff(), c.ReconnectNotify)
- }
- return err
-}
-
-// SubscribeChan sends all events to the provided channel
-func (c *Client) SubscribeChan(stream string, ch chan *Event) error {
- return c.SubscribeChanWithContext(context.Background(), stream, ch)
-}
-
-// SubscribeChanWithContext sends all events to the provided channel with context
-func (c *Client) SubscribeChanWithContext(ctx context.Context, stream string, ch chan *Event) error {
- var connected bool
- errch := make(chan error)
- c.mu.Lock()
- c.subscribed[ch] = make(chan struct{})
- c.mu.Unlock()
-
- operation := func() error {
- resp, err := c.request(ctx, stream)
- if err != nil {
- return err
- }
- if validator := c.ResponseValidator; validator != nil {
- err = validator(c, resp)
- if err != nil {
- return err
- }
- } else if resp.StatusCode != 200 {
- resp.Body.Close()
- return fmt.Errorf("could not connect to stream: %s", http.StatusText(resp.StatusCode))
- }
- defer resp.Body.Close()
-
- if !connected {
- // Notify connect
- errch <- nil
- connected = true
- }
-
- reader := NewEventStreamReader(resp.Body, c.maxBufferSize)
- eventChan, errorChan := c.startReadLoop(reader)
-
- for {
- var msg *Event
- // Wait for message to arrive or exit
- select {
- case <-c.subscribed[ch]:
- return nil
- case err = <-errorChan:
- return err
- case msg = <-eventChan:
- }
-
- // Wait for message to be sent or exit
- if msg != nil {
- select {
- case <-c.subscribed[ch]:
- return nil
- case ch <- msg:
- // message sent
- }
- }
- }
- }
-
- go func() {
- defer c.cleanup(ch)
- // Apply user specified reconnection strategy or default to standard NewExponentialBackOff() reconnection method
- var err error
- if c.ReconnectStrategy != nil {
- err = backoff.RetryNotify(operation, c.ReconnectStrategy, c.ReconnectNotify)
- } else {
- err = backoff.RetryNotify(operation, backoff.NewExponentialBackOff(), c.ReconnectNotify)
- }
-
- // channel closed once connected
- if err != nil && !connected {
- errch <- err
- }
- }()
- err := <-errch
- close(errch)
- return err
-}
-
-func (c *Client) startReadLoop(reader *EventStreamReader) (chan *Event, chan error) {
- outCh := make(chan *Event)
- erChan := make(chan error)
- go c.readLoop(reader, outCh, erChan)
- return outCh, erChan
-}
-
-func (c *Client) readLoop(reader *EventStreamReader, outCh chan *Event, erChan chan error) {
- for {
- // Read each new line and process the type of event
- event, err := reader.ReadEvent()
- if err != nil {
- if err == io.EOF {
- erChan <- nil
- return
- }
- // run user specified disconnect function
- if c.disconnectcb != nil {
- c.Connected = false
- c.disconnectcb(c)
- }
- erChan <- err
- return
- }
-
- if !c.Connected && c.connectedcb != nil {
- c.Connected = true
- c.connectedcb(c)
- }
-
- // If we get an error, ignore it.
- var msg *Event
- if msg, err = c.processEvent(event); err == nil {
- if len(msg.ID) > 0 {
- c.LastEventID.Store(msg.ID)
- } else {
- msg.ID, _ = c.LastEventID.Load().([]byte)
- }
-
- // Send downstream if the event has something useful
- if msg.hasContent() {
- outCh <- msg
- }
- }
- }
-}
-
-// SubscribeRaw to an sse endpoint
-func (c *Client) SubscribeRaw(handler func(msg *Event)) error {
- return c.Subscribe("", handler)
-}
-
-// SubscribeRawWithContext to an sse endpoint with context
-func (c *Client) SubscribeRawWithContext(ctx context.Context, handler func(msg *Event)) error {
- return c.SubscribeWithContext(ctx, "", handler)
-}
-
-// SubscribeChanRaw sends all events to the provided channel
-func (c *Client) SubscribeChanRaw(ch chan *Event) error {
- return c.SubscribeChan("", ch)
-}
-
-// SubscribeChanRawWithContext sends all events to the provided channel with context
-func (c *Client) SubscribeChanRawWithContext(ctx context.Context, ch chan *Event) error {
- return c.SubscribeChanWithContext(ctx, "", ch)
-}
-
-// Unsubscribe unsubscribes a channel
-func (c *Client) Unsubscribe(ch chan *Event) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.subscribed[ch] != nil {
- c.subscribed[ch] <- struct{}{}
- }
-}
-
-// OnDisconnect specifies the function to run when the connection disconnects
-func (c *Client) OnDisconnect(fn ConnCallback) {
- c.disconnectcb = fn
-}
-
-// OnConnect specifies the function to run when the connection is successful
-func (c *Client) OnConnect(fn ConnCallback) {
- c.connectedcb = fn
-}
-
-func (c *Client) request(ctx context.Context, stream string) (*http.Response, error) {
- req, err := http.NewRequest("GET", c.URL, nil)
- if err != nil {
- return nil, err
- }
- req = req.WithContext(ctx)
-
- // Setup request, specify stream to connect to
- if stream != "" {
- query := req.URL.Query()
- query.Add("stream", stream)
- req.URL.RawQuery = query.Encode()
- }
-
- req.Header.Set("Cache-Control", "no-cache")
- req.Header.Set("Accept", "text/event-stream")
- req.Header.Set("Connection", "keep-alive")
-
- lastID, exists := c.LastEventID.Load().([]byte)
- if exists && lastID != nil {
- req.Header.Set("Last-Event-ID", string(lastID))
- }
-
- // Add user specified headers
- for k, v := range c.Headers {
- req.Header.Set(k, v)
- }
-
- return c.Connection.Do(req)
-}
-
-func (c *Client) processEvent(msg []byte) (event *Event, err error) {
- var e Event
-
- if len(msg) < 1 {
- return nil, errors.New("event message was empty")
- }
-
- // Normalize the crlf to lf to make it easier to split the lines.
- // Split the line by "\n" or "\r", per the spec.
- for _, line := range bytes.FieldsFunc(msg, func(r rune) bool { return r == '\n' || r == '\r' }) {
- switch {
- case bytes.HasPrefix(line, headerID):
- e.ID = append([]byte(nil), trimHeader(len(headerID), line)...)
- case bytes.HasPrefix(line, headerData):
- // The spec allows for multiple data fields per event, concatenated them with "\n".
- e.Data = append(e.Data[:], append(trimHeader(len(headerData), line), byte('\n'))...)
- // The spec says that a line that simply contains the string "data" should be treated as a data field with an empty body.
- case bytes.Equal(line, bytes.TrimSuffix(headerData, []byte(":"))):
- e.Data = append(e.Data, byte('\n'))
- case bytes.HasPrefix(line, headerEvent):
- e.Event = append([]byte(nil), trimHeader(len(headerEvent), line)...)
- case bytes.HasPrefix(line, headerRetry):
- e.Retry = append([]byte(nil), trimHeader(len(headerRetry), line)...)
- default:
- // Ignore any garbage that doesn't match what we're looking for.
- }
- }
-
- // Trim the last "\n" per the spec.
- e.Data = bytes.TrimSuffix(e.Data, []byte("\n"))
-
- if c.EncodingBase64 {
- buf := make([]byte, base64.StdEncoding.DecodedLen(len(e.Data)))
-
- n, err := base64.StdEncoding.Decode(buf, e.Data)
- if err != nil {
- err = fmt.Errorf("failed to decode event message: %s", err)
- }
- e.Data = buf[:n]
- }
- return &e, err
-}
-
-func (c *Client) cleanup(ch chan *Event) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.subscribed[ch] != nil {
- close(c.subscribed[ch])
- delete(c.subscribed, ch)
- }
-}
-
-func trimHeader(size int, data []byte) []byte {
- if data == nil || len(data) < size {
- return data
- }
-
- data = data[size:]
- // Remove optional leading whitespace
- if len(data) > 0 && data[0] == 32 {
- data = data[1:]
- }
- // Remove trailing new line
- if len(data) > 0 && data[len(data)-1] == 10 {
- data = data[:len(data)-1]
- }
- return data
-}
diff --git a/vendor/github.com/r3labs/sse/v2/event.go b/vendor/github.com/r3labs/sse/v2/event.go
deleted file mode 100644
index 1258038786..0000000000
--- a/vendor/github.com/r3labs/sse/v2/event.go
+++ /dev/null
@@ -1,114 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-package sse
-
-import (
- "bufio"
- "bytes"
- "context"
- "io"
- "time"
-)
-
-// Event holds all of the event source fields
-type Event struct {
- timestamp time.Time
- ID []byte
- Data []byte
- Event []byte
- Retry []byte
- Comment []byte
-}
-
-func (e *Event) hasContent() bool {
- return len(e.ID) > 0 || len(e.Data) > 0 || len(e.Event) > 0 || len(e.Retry) > 0
-}
-
-// EventStreamReader scans an io.Reader looking for EventStream messages.
-type EventStreamReader struct {
- scanner *bufio.Scanner
-}
-
-// NewEventStreamReader creates an instance of EventStreamReader.
-func NewEventStreamReader(eventStream io.Reader, maxBufferSize int) *EventStreamReader {
- scanner := bufio.NewScanner(eventStream)
- initBufferSize := minPosInt(4096, maxBufferSize)
- scanner.Buffer(make([]byte, initBufferSize), maxBufferSize)
-
- split := func(data []byte, atEOF bool) (int, []byte, error) {
- if atEOF && len(data) == 0 {
- return 0, nil, nil
- }
-
- // We have a full event payload to parse.
- if i, nlen := containsDoubleNewline(data); i >= 0 {
- return i + nlen, data[0:i], nil
- }
- // If we're at EOF, we have all of the data.
- if atEOF {
- return len(data), data, nil
- }
- // Request more data.
- return 0, nil, nil
- }
- // Set the split function for the scanning operation.
- scanner.Split(split)
-
- return &EventStreamReader{
- scanner: scanner,
- }
-}
-
-// Returns a tuple containing the index of a double newline, and the number of bytes
-// represented by that sequence. If no double newline is present, the first value
-// will be negative.
-func containsDoubleNewline(data []byte) (int, int) {
- // Search for each potentially valid sequence of newline characters
- crcr := bytes.Index(data, []byte("\r\r"))
- lflf := bytes.Index(data, []byte("\n\n"))
- crlflf := bytes.Index(data, []byte("\r\n\n"))
- lfcrlf := bytes.Index(data, []byte("\n\r\n"))
- crlfcrlf := bytes.Index(data, []byte("\r\n\r\n"))
- // Find the earliest position of a double newline combination
- minPos := minPosInt(crcr, minPosInt(lflf, minPosInt(crlflf, minPosInt(lfcrlf, crlfcrlf))))
- // Detemine the length of the sequence
- nlen := 2
- if minPos == crlfcrlf {
- nlen = 4
- } else if minPos == crlflf || minPos == lfcrlf {
- nlen = 3
- }
- return minPos, nlen
-}
-
-// Returns the minimum non-negative value out of the two values. If both
-// are negative, a negative value is returned.
-func minPosInt(a, b int) int {
- if a < 0 {
- return b
- }
- if b < 0 {
- return a
- }
- if a > b {
- return b
- }
- return a
-}
-
-// ReadEvent scans the EventStream for events.
-func (e *EventStreamReader) ReadEvent() ([]byte, error) {
- if e.scanner.Scan() {
- event := e.scanner.Bytes()
- return event, nil
- }
- if err := e.scanner.Err(); err != nil {
- if err == context.Canceled {
- return nil, io.EOF
- }
- return nil, err
- }
- return nil, io.EOF
-}
diff --git a/vendor/github.com/r3labs/sse/v2/event_log.go b/vendor/github.com/r3labs/sse/v2/event_log.go
deleted file mode 100644
index aa17dad058..0000000000
--- a/vendor/github.com/r3labs/sse/v2/event_log.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-package sse
-
-import (
- "strconv"
- "time"
-)
-
-// EventLog holds all of previous events
-type EventLog []*Event
-
-// Add event to eventlog
-func (e *EventLog) Add(ev *Event) {
- if !ev.hasContent() {
- return
- }
-
- ev.ID = []byte(e.currentindex())
- ev.timestamp = time.Now()
- *e = append(*e, ev)
-}
-
-// Clear events from eventlog
-func (e *EventLog) Clear() {
- *e = nil
-}
-
-// Replay events to a subscriber
-func (e *EventLog) Replay(s *Subscriber) {
- for i := 0; i < len(*e); i++ {
- id, _ := strconv.Atoi(string((*e)[i].ID))
- if id >= s.eventid {
- s.connection <- (*e)[i]
- }
- }
-}
-
-func (e *EventLog) currentindex() string {
- return strconv.Itoa(len(*e))
-}
diff --git a/vendor/github.com/r3labs/sse/v2/http.go b/vendor/github.com/r3labs/sse/v2/http.go
deleted file mode 100644
index c7a2b434a9..0000000000
--- a/vendor/github.com/r3labs/sse/v2/http.go
+++ /dev/null
@@ -1,120 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-package sse
-
-import (
- "bytes"
- "fmt"
- "net/http"
- "strconv"
- "time"
-)
-
-// ServeHTTP serves new connections with events for a given stream ...
-func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- flusher, err := w.(http.Flusher)
- if !err {
- http.Error(w, "Streaming unsupported!", http.StatusInternalServerError)
- return
- }
-
- w.Header().Set("Content-Type", "text/event-stream")
- w.Header().Set("Cache-Control", "no-cache")
- w.Header().Set("Connection", "keep-alive")
-
- for k, v := range s.Headers {
- w.Header().Set(k, v)
- }
-
- // Get the StreamID from the URL
- streamID := r.URL.Query().Get("stream")
- if streamID == "" {
- http.Error(w, "Please specify a stream!", http.StatusInternalServerError)
- return
- }
-
- stream := s.getStream(streamID)
-
- if stream == nil {
- if !s.AutoStream {
- http.Error(w, "Stream not found!", http.StatusInternalServerError)
- return
- }
-
- stream = s.CreateStream(streamID)
- }
-
- eventid := 0
- if id := r.Header.Get("Last-Event-ID"); id != "" {
- var err error
- eventid, err = strconv.Atoi(id)
- if err != nil {
- http.Error(w, "Last-Event-ID must be a number!", http.StatusBadRequest)
- return
- }
- }
-
- // Create the stream subscriber
- sub := stream.addSubscriber(eventid, r.URL)
-
- go func() {
- <-r.Context().Done()
-
- sub.close()
-
- if s.AutoStream && !s.AutoReplay && stream.getSubscriberCount() == 0 {
- s.RemoveStream(streamID)
- }
- }()
-
- w.WriteHeader(http.StatusOK)
- flusher.Flush()
-
- // Push events to client
- for ev := range sub.connection {
- // If the data buffer is an empty string abort.
- if len(ev.Data) == 0 && len(ev.Comment) == 0 {
- break
- }
-
- // if the event has expired, dont send it
- if s.EventTTL != 0 && time.Now().After(ev.timestamp.Add(s.EventTTL)) {
- continue
- }
-
- if len(ev.Data) > 0 {
- fmt.Fprintf(w, "id: %s\n", ev.ID)
-
- if s.SplitData {
- sd := bytes.Split(ev.Data, []byte("\n"))
- for i := range sd {
- fmt.Fprintf(w, "data: %s\n", sd[i])
- }
- } else {
- if bytes.HasPrefix(ev.Data, []byte(":")) {
- fmt.Fprintf(w, "%s\n", ev.Data)
- } else {
- fmt.Fprintf(w, "data: %s\n", ev.Data)
- }
- }
-
- if len(ev.Event) > 0 {
- fmt.Fprintf(w, "event: %s\n", ev.Event)
- }
-
- if len(ev.Retry) > 0 {
- fmt.Fprintf(w, "retry: %s\n", ev.Retry)
- }
- }
-
- if len(ev.Comment) > 0 {
- fmt.Fprintf(w, ": %s\n", ev.Comment)
- }
-
- fmt.Fprint(w, "\n")
-
- flusher.Flush()
- }
-}
diff --git a/vendor/github.com/r3labs/sse/v2/server.go b/vendor/github.com/r3labs/sse/v2/server.go
deleted file mode 100644
index d1b27af325..0000000000
--- a/vendor/github.com/r3labs/sse/v2/server.go
+++ /dev/null
@@ -1,156 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-package sse
-
-import (
- "encoding/base64"
- "sync"
- "time"
-)
-
-// DefaultBufferSize size of the queue that holds the streams messages.
-const DefaultBufferSize = 1024
-
-// Server Is our main struct
-type Server struct {
- // Extra headers adding to the HTTP response to each client
- Headers map[string]string
- // Sets a ttl that prevents old events from being transmitted
- EventTTL time.Duration
- // Specifies the size of the message buffer for each stream
- BufferSize int
- // Encodes all data as base64
- EncodeBase64 bool
- // Splits an events data into multiple data: entries
- SplitData bool
- // Enables creation of a stream when a client connects
- AutoStream bool
- // Enables automatic replay for each new subscriber that connects
- AutoReplay bool
-
- // Specifies the function to run when client subscribe or un-subscribe
- OnSubscribe func(streamID string, sub *Subscriber)
- OnUnsubscribe func(streamID string, sub *Subscriber)
-
- streams map[string]*Stream
- muStreams sync.RWMutex
-}
-
-// New will create a server and setup defaults
-func New() *Server {
- return &Server{
- BufferSize: DefaultBufferSize,
- AutoStream: false,
- AutoReplay: true,
- streams: make(map[string]*Stream),
- Headers: map[string]string{},
- }
-}
-
-// NewWithCallback will create a server and setup defaults with callback function
-func NewWithCallback(onSubscribe, onUnsubscribe func(streamID string, sub *Subscriber)) *Server {
- return &Server{
- BufferSize: DefaultBufferSize,
- AutoStream: false,
- AutoReplay: true,
- streams: make(map[string]*Stream),
- Headers: map[string]string{},
- OnSubscribe: onSubscribe,
- OnUnsubscribe: onUnsubscribe,
- }
-}
-
-// Close shuts down the server, closes all of the streams and connections
-func (s *Server) Close() {
- s.muStreams.Lock()
- defer s.muStreams.Unlock()
-
- for id := range s.streams {
- s.streams[id].close()
- delete(s.streams, id)
- }
-}
-
-// CreateStream will create a new stream and register it
-func (s *Server) CreateStream(id string) *Stream {
- s.muStreams.Lock()
- defer s.muStreams.Unlock()
-
- if s.streams[id] != nil {
- return s.streams[id]
- }
-
- str := newStream(id, s.BufferSize, s.AutoReplay, s.AutoStream, s.OnSubscribe, s.OnUnsubscribe)
- str.run()
-
- s.streams[id] = str
-
- return str
-}
-
-// RemoveStream will remove a stream
-func (s *Server) RemoveStream(id string) {
- s.muStreams.Lock()
- defer s.muStreams.Unlock()
-
- if s.streams[id] != nil {
- s.streams[id].close()
- delete(s.streams, id)
- }
-}
-
-// StreamExists checks whether a stream by a given id exists
-func (s *Server) StreamExists(id string) bool {
- return s.getStream(id) != nil
-}
-
-// Publish sends a mesage to every client in a streamID.
-// If the stream's buffer is full, it blocks until the message is sent out to
-// all subscribers (but not necessarily arrived the clients), or when the
-// stream is closed.
-func (s *Server) Publish(id string, event *Event) {
- stream := s.getStream(id)
- if stream == nil {
- return
- }
-
- select {
- case <-stream.quit:
- case stream.event <- s.process(event):
- }
-}
-
-// TryPublish is the same as Publish except that when the operation would cause
-// the call to be blocked, it simply drops the message and returns false.
-// Together with a small BufferSize, it can be useful when publishing the
-// latest message ASAP is more important than reliable delivery.
-func (s *Server) TryPublish(id string, event *Event) bool {
- stream := s.getStream(id)
- if stream == nil {
- return false
- }
-
- select {
- case stream.event <- s.process(event):
- return true
- default:
- return false
- }
-}
-
-func (s *Server) getStream(id string) *Stream {
- s.muStreams.RLock()
- defer s.muStreams.RUnlock()
- return s.streams[id]
-}
-
-func (s *Server) process(event *Event) *Event {
- if s.EncodeBase64 {
- output := make([]byte, base64.StdEncoding.EncodedLen(len(event.Data)))
- base64.StdEncoding.Encode(output, event.Data)
- event.Data = output
- }
- return event
-}
diff --git a/vendor/github.com/r3labs/sse/v2/stream.go b/vendor/github.com/r3labs/sse/v2/stream.go
deleted file mode 100644
index bfbcb9b523..0000000000
--- a/vendor/github.com/r3labs/sse/v2/stream.go
+++ /dev/null
@@ -1,153 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-package sse
-
-import (
- "net/url"
- "sync"
- "sync/atomic"
-)
-
-// Stream ...
-type Stream struct {
- ID string
- event chan *Event
- quit chan struct{}
- quitOnce sync.Once
- register chan *Subscriber
- deregister chan *Subscriber
- subscribers []*Subscriber
- Eventlog EventLog
- subscriberCount int32
- // Enables replaying of eventlog to newly added subscribers
- AutoReplay bool
- isAutoStream bool
-
- // Specifies the function to run when client subscribe or un-subscribe
- OnSubscribe func(streamID string, sub *Subscriber)
- OnUnsubscribe func(streamID string, sub *Subscriber)
-}
-
-// newStream returns a new stream
-func newStream(id string, buffSize int, replay, isAutoStream bool, onSubscribe, onUnsubscribe func(string, *Subscriber)) *Stream {
- return &Stream{
- ID: id,
- AutoReplay: replay,
- subscribers: make([]*Subscriber, 0),
- isAutoStream: isAutoStream,
- register: make(chan *Subscriber),
- deregister: make(chan *Subscriber),
- event: make(chan *Event, buffSize),
- quit: make(chan struct{}),
- Eventlog: make(EventLog, 0),
- OnSubscribe: onSubscribe,
- OnUnsubscribe: onUnsubscribe,
- }
-}
-
-func (str *Stream) run() {
- go func(str *Stream) {
- for {
- select {
- // Add new subscriber
- case subscriber := <-str.register:
- str.subscribers = append(str.subscribers, subscriber)
- if str.AutoReplay {
- str.Eventlog.Replay(subscriber)
- }
-
- // Remove closed subscriber
- case subscriber := <-str.deregister:
- i := str.getSubIndex(subscriber)
- if i != -1 {
- str.removeSubscriber(i)
- }
-
- if str.OnUnsubscribe != nil {
- go str.OnUnsubscribe(str.ID, subscriber)
- }
-
- // Publish event to subscribers
- case event := <-str.event:
- if str.AutoReplay {
- str.Eventlog.Add(event)
- }
- for i := range str.subscribers {
- str.subscribers[i].connection <- event
- }
-
- // Shutdown if the server closes
- case <-str.quit:
- // remove connections
- str.removeAllSubscribers()
- return
- }
- }
- }(str)
-}
-
-func (str *Stream) close() {
- str.quitOnce.Do(func() {
- close(str.quit)
- })
-}
-
-func (str *Stream) getSubIndex(sub *Subscriber) int {
- for i := range str.subscribers {
- if str.subscribers[i] == sub {
- return i
- }
- }
- return -1
-}
-
-// addSubscriber will create a new subscriber on a stream
-func (str *Stream) addSubscriber(eventid int, url *url.URL) *Subscriber {
- atomic.AddInt32(&str.subscriberCount, 1)
- sub := &Subscriber{
- eventid: eventid,
- quit: str.deregister,
- connection: make(chan *Event, 64),
- URL: url,
- }
-
- if str.isAutoStream {
- sub.removed = make(chan struct{}, 1)
- }
-
- str.register <- sub
-
- if str.OnSubscribe != nil {
- go str.OnSubscribe(str.ID, sub)
- }
-
- return sub
-}
-
-func (str *Stream) removeSubscriber(i int) {
- atomic.AddInt32(&str.subscriberCount, -1)
- close(str.subscribers[i].connection)
- if str.subscribers[i].removed != nil {
- str.subscribers[i].removed <- struct{}{}
- close(str.subscribers[i].removed)
- }
- str.subscribers = append(str.subscribers[:i], str.subscribers[i+1:]...)
-}
-
-func (str *Stream) removeAllSubscribers() {
- for i := 0; i < len(str.subscribers); i++ {
- close(str.subscribers[i].connection)
- if str.subscribers[i].removed != nil {
- str.subscribers[i].removed <- struct{}{}
- close(str.subscribers[i].removed)
- }
- }
- atomic.StoreInt32(&str.subscriberCount, 0)
- str.subscribers = str.subscribers[:0]
-}
-
-func (str *Stream) getSubscriberCount() int {
- return int(atomic.LoadInt32(&str.subscriberCount))
-}
diff --git a/vendor/github.com/r3labs/sse/v2/subscriber.go b/vendor/github.com/r3labs/sse/v2/subscriber.go
deleted file mode 100644
index 4b54c204f3..0000000000
--- a/vendor/github.com/r3labs/sse/v2/subscriber.go
+++ /dev/null
@@ -1,24 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-package sse
-
-import "net/url"
-
-// Subscriber ...
-type Subscriber struct {
- quit chan *Subscriber
- connection chan *Event
- removed chan struct{}
- eventid int
- URL *url.URL
-}
-
-// Close will let the stream know that the clients connection has terminated
-func (s *Subscriber) close() {
- s.quit <- s
- if s.removed != nil {
- <-s.removed
- }
-}
diff --git a/vendor/github.com/tmaxmax/go-sse/.gitignore b/vendor/github.com/tmaxmax/go-sse/.gitignore
new file mode 100644
index 0000000000..485dee64bc
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/.gitignore
@@ -0,0 +1 @@
+.idea
diff --git a/vendor/github.com/tmaxmax/go-sse/.golangci.yml b/vendor/github.com/tmaxmax/go-sse/.golangci.yml
new file mode 100644
index 0000000000..2ce58bbd38
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/.golangci.yml
@@ -0,0 +1,58 @@
+linters:
+ enable:
+ - errcheck
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - typecheck
+ - dogsled
+ - dupl
+ - errorlint
+ - exhaustive
+ - nestif
+ - goconst
+ - gocritic
+ - gocyclo
+ - godot
+ - godox
+ - gofmt
+ - gofumpt
+ - goheader
+ - goimports
+ - gomoddirectives
+ - gomodguard
+ - gosec
+ - importas
+ - makezero
+ - misspell
+ - prealloc
+ - promlinter
+ - predeclared
+ - nolintlint
+ - revive
+ - stylecheck
+ - tagliatelle
+ - thelper
+ - unparam
+ - unused
+ - whitespace
+linters-settings:
+ gosec:
+ excludes:
+ - G404
+ gocritic:
+ disabled-checks:
+ - ifElseChain
+ - unnamedResult
+ - hugeParam
+ enabled-tags:
+ - performance
+ - diagnostic
+ - experimental
+ - opinionated
+ nestif:
+ min-complexity: 8
+ govet:
+ enable:
+ - fieldalignment
diff --git a/vendor/github.com/tmaxmax/go-sse/CHANGELOG.md b/vendor/github.com/tmaxmax/go-sse/CHANGELOG.md
new file mode 100644
index 0000000000..2880426f19
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/CHANGELOG.md
@@ -0,0 +1,323 @@
+# Changelog
+
+This file tracks changes to this project. It follows the [Keep a Changelog format](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [0.11.0] - 2025-05-14
+
+The `sse.Server` logging and session handling were revamped to have more familiar, more flexible and less error prone interfaces for users.
+
+### Removed
+
+- `Logger` and `LogLevel` enum have been removed. `Server.Logger` has transitioned to the standard `slog` library for better compatibility with the ecosystem
+
+### Changed
+
+- `Server.Logger` is now of type `func(r *http.Request) *slog.Logger` instead of `sse.Logger` – it is possible to customize the logger on a per-request basis, by for example retrieving it from the context.
+- `Server.OnSession` signature changed from `func(s *Session) (Subscription, bool)` to `func(w http.ResponseWriter, r *http.Request) (topics []string, accepted bool)` – its initial role was to essentially just provide the topics, so the need to fiddle with `Session` and `Subscription` was redundant anyway
+- `Joe.Subscribe` now always returns `ErrProviderClosed` when a `Joe` instance is closed while subscriptions are active. Previously it would return it only if `Joe` was already shut down before subscribing.
+- `Joe` will print a stack trace for `Replayer` panics.
+
+### Fixed
+
+- `sse.Session` doesn't write the header explicitly anymore. This would cause a `http: superfluous response.WriteHeader call` warning being logged when `sse.Server.OnSession` writes a response code itself when accepting a session. The change was initially introduced to remove the warning for users of certain external libraries (see #41) but this is the issue of the external library, not of `go-sse`. If you encounter this warning when using an external library, write the response code yourself in the HTTP handler before subscribing the `sse.Session`, as described in the linked discussion.
+- An insidious synchronization issue in `Joe` causing a channel double close in an edge case scenario (see #50, see code for details)
+
+## [0.10.0] - 2024-12-29
+
+If you're working with LLMs in Go this update will make you happy! `sse.Read` is now a thing – it just parses all events from an `io.Reader`. Use it with your response bodies and forget about any `sse.Client` configuration. It also makes use of the new Go 1.23 iterators to keep your code neat and tidy.
+
+### Added
+
+- `Read` and `ReadConfig`
+
+## [0.9.0] - 2024-12-26
+
+This is the replayer update. Oh, what is a "replayer"? It's how we call replay providers starting with this version! Anyway, besides renaming, this update removes many replaying bugs, improves performance, robustness and error handling and better defines expected behavior for `ReplayProviders`... err, `Replayers`.
+
+More such overhauls are planned. I'm leaving it up to you to guess which comes next – the server or the client? ;)
+
+### Removed
+
+- `FiniteReplayer.{Count, AutoIDs}` – use the constructor instead.
+- `ValidReplayer.{TTL, AutoIDs}` – use the constructor instead.
+
+### Changed
+
+- The `ReplayProvider` and related entities are renamed to just `Replayer`. `go-sse` strives to have a minimal and expressive API, and minimal and expressive names are an important step in that direction. The changelog will use the new names onwards.
+- Due to a change in the internal implementation, the `FiniteReplayer` is now able to replay events only if the event with the LastEventID provided by the client is still buffered. Previously if the LastEventID was that of the latest removed event, events would still be replayed. This detail added complexity to the implementation without an apparent significant win, so it was dropped.
+- `FiniteReplayer.GCInterval` should be set to `0` now in order to disable GC.
+- Automatic ID generation for both replayers does not overwrite already existing message IDs and errors instead. Ensure that your events do not have IDs when using replayers configured to generate IDs.
+- `Replayer.Put` now returns an error instead of being required to panic. Read the method documentation for more info. `Joe` also propagates this error through `Joe.Publish`.
+- Replayers are now required to not overwrite message IDs and return errors instead. Sending unsupported messages to replayers is a bug which should not go unnoticed. Both replayers in this library now implement this behavior.
+- `Joe` does not log replayer panics to the console anymore. Handle these panics inside the replay provider itself.
+
+### Added
+
+- `NewFiniteReplayer` constructor
+- `NewValidReplayer` constructor
+- `Connection.Buffer`
+
+### Fixed
+
+- `FiniteReplayer` doesn't leak memory anymore and respects the stored messages count it was given. Previously when a new message was put after the messages count was reached and some other messages were removed, the total messages count would grow unexpectedly and `FiniteReplayer` would store and replay more events than it was configured to.
+- `ValidReplayer` was also susceptible to a similar memory leak, which is also fixed now.
+- #41 – `sse.Session` now writes the header explicitly when upgrading.
+
+## [0.8.0] - 2024-01-30
+
+This version removes all external dependencies of `go-sse`. All our bugs are belong to us! It also does some API and documentation cleanups.
+
+### Removed
+
+- `Client.DefaultReconnectionTime`, `Client.MaxRetries` have been replaced with the new `Client.Backoff` configuration field. See the Added section for more info.
+- `ErrReplayFailed` is removed from the public API.
+- `ReplayProviderWithGC` and `Joe.ReplayGCInterval` are no more. The responsibility for garbage collection is assigned to the replay providers.
+
+### Changed
+
+- `Server.Logger` is now of a new type: the `Logger` interface. The dependency on x/exp/slog is removed. This opens up the possibility to adapt any existing logger to be usable with `Server`.
+- The default backoff behavior has changed. The _previous_ defaults map to the new `Backoff` configuration as follows:
+```go
+sse.Backoff{
+ InitialInterval: 5 * time.Second, // currently 500ms
+ Multiplier: 1.5, // currently the same
+ Jitter: 0.5, // currently the same
+ MaxInterval: 60 * time.Second, // currently unbounded
+ MaxElapsedDuration: 15 * time.Minute, // currently unbounded
+ MaxRetries: -1, // previously no retries by default, currently unbounded
+}
+```
+- `Joe` now accepts new subscriptions even if replay providers panic (previously `ErrReplayFailed` would be returned).
+- `Server.ServeHTTP` panics if a custom `OnSession` handler returns a `Subscription` with 0 topics
+
+### Added
+
+- The `Logger` interface, `LogLevel` type, and `LogLevel(Info|Warn|Error)` values.
+- `Backoff` and `Client.Backoff` – the backoff strategy is now fully configurable. See the code documentation for info.
+- `ValidReplayProvider.GCInterval`, to configure at which interval expired events should be cleaned up.
+
+## [0.7.0] - 2023-11-19
+
+This version overhauls connection retry and fixes the connection event dispatch order issue. Some internal changes to Joe were also made, which makes it faster and more resilient.
+
+### Removed
+
+- `ConnectionError.Temporary`
+- `ConnectionError.Timeout`
+
+### Changed
+
+- Go's `Timeout` and `Temporary` interfaces are not used anymore – the client makes no assumptions and retries on every network or response read error. The only cases when `Connection.Connect` returns now are either when there are no more retries left (when the number is not infinite), or when the request context was cancelled.
+- `*url.Error`s that occur on the HTTP request are now unwrapped and their cause is put inside a `ConnectionError`.
+- `Connection.Connect` doesn't suppress any errors anymore: the request context errors are returned as is, all other errors are wrapped inside `ConnectionError`.
+- On reconnection attempt, the response reset error is now wrapped inside `ConnectionError`. With this change, all errors other than the context errors are wrapped inside `ConnectionError`.
+- Subscription callbacks are no longer called in individual goroutines. This caused messages to be received in an indeterminate order. Make sure that your callbacks do not block for too long!
+
+### Changed
+
+- If a `ReplayProvider` method panics when called by `Joe`, instead of closing itself completely it just stops replaying, putting or GC-ing messages to upcoming clients. `Joe` continues to function as if no replay provider was given. A stack trace is printed to stderr when such a panic occurs.
+
+## [0.6.0] - 2023-07-22
+
+This version brings a number of refactors to the server-side tooling the library offers. Constructors and construction related types are removed, for ease of use and reduced API size, concerns regarding topics and expiry were separated from `Message`, logging of the `Server` is upgraded to structured logging and messages can be now published to multiple topics at once. Request upgrading has also been refactored to provide a more functional API, and the `Server` logic can now be customized without having to create a distinct handler.
+
+### Removed
+
+- `Message.ExpiresAt` is no more.
+- `Message.Topic` is no more. See the changes to `Server`, `Provider` and `ReplayProvider` for handling topics – you can now publish a message to multiple topics at once.
+- `Message.Writer` is no more. The API was redundant – one can achieve the same using `strings.Builder` and `Message.AppendData`. See the `MessageWriter` example for more.
+- `NewValidReplayProvider` is no more.
+- `NewFiniteReplayProvider` is no more.
+- `NewJoe` is no more.
+- `JoeConfig` is no more.
+- `Server.Subscribe` is no more – it never made sense.
+- `Server.Provider` is no more.
+- `NewServer`, `ServerOption` and friends are no more.
+- The `Logger` interface and the capability of the `Server` to use types that implement `Logger` as logging systems is removed.
+- `SubscriptionCallback` is no more (see the change to the `Subscription` type in the "Changed" section).
+
+### Added
+
+- Because the `ValidReplayProvider` constructor was removed, the fields `ValidReplayProvider.{TTL,AutoIDs}` were added for configuration.
+- Because the `FiniteReplayProvider` constructor was removed, the fields `FiniteReplayProvider.{Count,AutoIDs}` were added for configuration.
+- Because the `Joe` constructor was removed, the fields `Joe.{ReplayProvider,ReplayGCInterval}` were added for configuration.
+- Because the `Server` constructor was removed, the field `Server.Provider` was added for configuration.
+- New `MessageWriter` interface; used by providers to send messages and implemented by `Session` (previously named `Request`).
+- New `ResponseWriter` interface, which is a `http.ResponseWriter` augmented with a `Flush` method.
+- `ValidReplayProvider` has a new field `Now` which allows providing a custom current time getter, like `time.Now`, to the provider. Enables deterministic testing of dependents on `ValidReplayProvider`.
+- New `Server.OnSession` field, which enables customization of `Server`'s response and subscriptions.
+- New `Server.Logger` field, which enables structured logging with logger retrieved from the request and customizable config of logged information.
+
+### Changed
+
+- `ReplayProvider.Put` takes a simple `*Message` and returns a `*Message`, instead of changing the `*Message` to which the `**Message` parameter points.
+ It also takes a slice of topics, given that the `Message` doesn't hold the topic itself anymore. If the Message cannot be put, the method must now panic – see documentation for info.
+- Because `Message.ExpiresAt` is removed, the `ValidReplayProvider` sets the expiry itself.
+- `Server.Publish` now takes a list of topics.
+- `Provider.Publish` now takes a non-empty slice of topics.
+- `ReplayProvider.Put` now takes a non-empty slice of topics.
+- `Provider.Stop` is now `Provider.Shutdown` and takes now a `context.Context` as a parameter.
+- `Server.Shutdown` takes now a `context.Context` as a parameter.
+- `Request` is now named `Session` and exposes the HTTP request, response writer, and the last event ID of the request.
+- A new method `Flush` is added to `Session`; messages are no longer flushed by default, which allows providers, replay providers to batch send messages.
+- `Upgrade` now takes an `*http.Request` as its second parameter.
+- `Subscription` now has a `Client` field of type `MessageWriter` instead of a `Callback`.
+- Given the `Subscription` change, `Provider.Subscribe` and `ReplayProvider.Replay` now report message sending errors.
+
+
+## [0.5.2] - 2023-07-12
+
+### Added
+
+- The new `Message.Writer` – write to the `Message` as if it is an `io.Writer`.
+
+### Fixed
+
+- `Message.UnmarshalText` now strips the leading Unicode BOM, if it exists, as per the specification.
+- When parsing events client-side, BOM removal was attempted on each event input. Now the BOM is correctly removed only when parsing is started.
+
+## [0.5.1] - 2023-07-12
+
+### Fixed
+
+- `Message.WriteTo` now writes nothing if `Message` is empty.
+- `Message.WriteTo` does not attempt to write the `retry` field if `Message.Retry` is not at least 1ms.
+- `NewType` error message is updated to say "event type", not "event name".
+
+## [0.5.0] - 2023-07-11
+
+This version comes with a series of internal refactorings that improve code readability and performance. It also replaces usage of `[]byte` for event data with `string` – SSE is a UTF-8 encoded text-based protocol, so raw bytes never made sense. This migration improves code safety (less `unsafe` usage and less worry about ownership) and reduces the memory footprint of some objects.
+
+Creating events on the server is also revised – fields that required getters and setters, apart from `data` and comments, are now simple public fields on the `sse.Message` struct.
+
+Across the codebase, to refer to the value of the `event` field the name "event type" is used, which is the nomenclature used in the SSE specification.
+
+Documentation and examples were also fixed and improved.
+
+### Added
+
+- New `sse.EventName` type, which holds valid values for the `event` field, together with constructors (`sse.Name` and `sse.NewName`).
+
+### Removed
+
+- `sse.Message`: `AppendText` was removed, as part of the migration from byte slices to strings. SSE is a UTF-8 encoded text-based protocol – raw bytes never made sense.
+
+### Changed
+
+- Minimum supported Go version was bumped from 1.16 to 1.19. From now on, the latest two major Go versions will be supported.
+- `sse.Message`: `AppendData` takes `string`s instead of `[]byte`.
+- `sse.Message`: `Comment` is now named `AppendComment`, for consistency with `AppendData`.
+- `sse.Message`: The message's expiration is not reset anymore by `UnmarshalText`.
+- `sse.Message`: `UnmarshalText` now unmarshals comments as well.
+- `sse.Message`: `WriteTo` (and `MarshalText` and `String` as a result) replaces all newline sequences in data with LF.
+- `sse.Message`: The `Expiry` getter and `SetExpiresAt`, `SetTTL` setters are replaced by the public field `ExpiresAt`.
+- `sse.Message`: Event ID getter and setter are replaced by the public `ID` field.
+- `sse.Message`: Event type (previously named `Name`) getter and setter are replaced by the public `Type` field.
+- `sse.Message`: The `retry` field value is now a public field on the struct. As a byproduct, `WriteTo` will now make 1 allocation when writing events with the `retry` field set.
+- `sse.NewEventID` is now `sse.NewID`, and `sse.MustEventID` is `sse.ID`.
+- `sse.Event`: The `Data` field is now of type `string`, not `[]byte`.
+- `sse.Event`: The `Name` field is now named `Type`.
+
+### Fixed
+
+- `sse.Message`: `Clone` now copies the topic of the message to the new value.
+- `sse.Message`: ID fields that contain NUL characters are now ignored, as required by the spec, in `UnmarshalText`.
+
+## [0.4.3] - 2023-07-08
+
+### Fixed
+
+- Messages longer than 4096 bytes are no longer being dropped ([#2], thanks [@aldld])
+- Event parsing no longer panics on empty field with colon after name, see [test case](https://github.com/tmaxmax/go-sse/blob/4938f99db3bf7a8f057cb3e21ca88df57db3c0e0/internal/parser/field_parser_test.go#L37-L45) for example ([#5])
+
+## [0.4.2] - 2021-10-17
+
+### Added
+
+- Get the event name of a Message
+
+## [0.4.1] - 2021-10-15
+
+### Added
+
+- Set a custom logger for Server
+
+## [0.4.0] - 2021-10-15
+
+### Changed
+
+- Server does not set any other headers besides `Content-Type`.
+- UpgradedRequest does not return a SendError anymore when Write errors.
+- Providers don't handle callback errors anymore. Callbacks return a flag that indicates whether the provider should keep calling it for new messages instead.
+
+### Fixed
+
+- Client's default response validator now ignores `Content-Type` parameters when checking if the response's content type is `text/event-stream`.
+- Various optimizations
+
+## [0.3.0] - 2021-09-18
+
+### Added
+
+- ReplayProviderWithGC interface, which must be satisfied by replay providers that must be cleaned up periodically.
+
+### Changed
+
+- Subscriptions now take a callback function instead of a channel.
+- Server response headers are now sent on the first Send call, not when Upgrade is called.
+- Providers are not required to add the default topic anymore. Callers of Subscribe should ensure at least a topic is specified.
+- Providers' Subscribe method now blocks until the subscriber is removed.
+- Server's Subscribe method automatically adds the default topic if no topic is specified.
+- ReplayProvider does not require for GC to be implemented.
+- Client connections take callback functions instead of channels as event listeners.
+- Client connections' Unsubscribe methods are replaced by functions returned by their Subscribe counterparts.
+
+### Fixed
+
+- Fix replay providers not replaying the oldest message if the ID provided is of the one before that one.
+- Fix replay providers hanging the caller's goroutine when a write error occurs using the default ServeHTTP implementation.
+- Fix providers hanging when a write error occurs using the default ServeHTTP implementation.
+
+## [0.2.0] - 2021-09-13
+
+### Added
+
+- Text/JSON marshalers and unmarshalers, and SQL scanners and valuers for the EventID type (previously event.ID).
+- Check for http.NoBody before resetting the request body on client reconnect.
+
+### Changed
+
+- Package structure. The module is now refactored into a single package with an idiomatic name. This has resulted in various name changes:
+ - `client.Error` - `sse.ConnectionError`
+ - `event.Event` - `sse.Message` (previous `server.Message` is removed, see next change)
+ - `event.ID` - `sse.EventID`
+ - `event.NewID` - `sse.NewEventID`
+ - `event.MustID` - `sse.MustEventID`
+ - `server.Connection` - `sse.UpgradedRequest`
+ - `server.NewConnection` - `sse.Upgrade`
+ - `server.ErrUnsupported` - `sse.ErrUpgradeUnsupported`
+ - `server.New` - `sse.NewServer`.
+- `event.Event` is merged with `server.Message`, becoming `sse.Message`. This affects the `sse.Server.Publish` function, which doesn't take a `topic` parameter anymore.
+- The server's constructor doesn't take an `Provider` as a parameter. It instead takes multiple optional `ServerOptions`. The `WithProvider` option is now used to pass custom providers to the server.
+- The `ReplayProvider` interface's `Put` method now takes a `**Message` instead of a `*Message`. This change also affects the replay providers in this package: `ValidReplayProvider` and `FiniteReplayProvider`.
+- The `Provider` interface's `Publish` method now takes a `*Message` instead of a `Message`. This change also affects `Joe`, the provider in this package.
+- The `UpgradedRequest`'s `Send` now method takes a `*Message` as parameter.
+
+## [0.1.0] - 2021-09-11 First release
+
+[@aldld]: https://github.com/aldld
+
+[#5]: https://github.com/tmaxmax/go-sse/pull/5
+[#2]: https://github.com/tmaxmax/go-sse/pull/2
+
+[0.6.0]: https://github.com/tmaxmax/go-sse/releases/tag/v0.6.0
+[0.5.2]: https://github.com/tmaxmax/go-sse/releases/tag/v0.5.2
+[0.5.1]: https://github.com/tmaxmax/go-sse/releases/tag/v0.5.1
+[0.5.0]: https://github.com/tmaxmax/go-sse/releases/tag/v0.5.0
+[0.4.3]: https://github.com/tmaxmax/go-sse/releases/tag/v0.4.3
+[0.4.2]: https://github.com/tmaxmax/go-sse/releases/tag/v0.4.2
+[0.4.1]: https://github.com/tmaxmax/go-sse/releases/tag/v0.4.1
+[0.4.0]: https://github.com/tmaxmax/go-sse/releases/tag/v0.4.0
+[0.3.0]: https://github.com/tmaxmax/go-sse/releases/tag/v0.3.0
+[0.2.0]: https://github.com/tmaxmax/go-sse/releases/tag/v0.2.0
+[0.1.0]: https://github.com/tmaxmax/go-sse/releases/tag/v0.1.0
diff --git a/vendor/github.com/tmaxmax/go-sse/LICENSE b/vendor/github.com/tmaxmax/go-sse/LICENSE
new file mode 100644
index 0000000000..4652fd0b4d
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 Teodor Maxim
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/tmaxmax/go-sse/README.md b/vendor/github.com/tmaxmax/go-sse/README.md
new file mode 100644
index 0000000000..35f92e5df9
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/README.md
@@ -0,0 +1,485 @@
+# go-sse
+
+[](https://pkg.go.dev/github.com/tmaxmax/go-sse)
+
+[](https://codecov.io/gh/tmaxmax/go-sse)
+[](https://goreportcard.com/report/github.com/tmaxmax/go-sse)
+
+Lightweight, fully spec-compliant HTML5 server-sent events library.
+
+## Table of contents
+
+- [go-sse](#go-sse)
+ - [Table of contents](#table-of-contents)
+ - [Installation and usage](#installation-and-usage)
+ - [Cut to the chase – how do I read my LLM's response?](#cut-to-the-chase--how-do-i-read-my-llms-response)
+ - [Implementing a server](#implementing-a-server)
+ - [Providers and why they are vital](#providers-and-why-they-are-vital)
+ - [Meet Joe, the default provider](#meet-joe-the-default-provider)
+ - [Publish your first event](#publish-your-first-event)
+ - [The server-side "Hello world"](#the-server-side-hello-world)
+ - [Using the client](#using-the-client)
+ - [Creating a client](#creating-a-client)
+ - [Initiating a connection](#initiating-a-connection)
+ - [Subscribing to events](#subscribing-to-events)
+ - [Establishing the connection](#establishing-the-connection)
+ - [Connection lost?](#connection-lost)
+ - [The "Hello world" server's client](#the-hello-world-servers-client)
+ - [License](#license)
+ - [Contributing](#contributing)
+
+## Installation and usage
+
+Install the package using `go get`:
+
+```sh
+go get -u github.com/tmaxmax/go-sse
+```
+
+It is strongly recommended to use tagged versions of `go-sse` in your projects. The `master` branch has tested but unreleased and maybe undocumented changes, which may break backwards compatibility - use with caution.
+
+The library provides both server-side and client-side implementations of the protocol. The implementations are completely decoupled and unopinionated: you can connect to a server created using `go-sse` from the browser and you can connect to any server that emits events using the client!
+
+If you are not familiar with the protocol or not sure how it works, read [MDN's guide for using server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events). [The spec](https://html.spec.whatwg.org/multipage/server-sent-events.html) is also useful read!
+
+`go-sse` promises to support the [Go versions supported by the Go team](https://go.dev/doc/devel/release#policy) – that is, the 2 most recent major releases.
+
+## Cut to the chase – how do I read my LLM's response?
+
+If you're here just to read ChatGPT's, Claude's or whichever LLM's response stream, you're in the right place! Let's take a look at [`sse.Read`](https://pkg.go.dev/github.com/tmaxmax/go-sse#Read): you just make your HTTP request the same way you'd do for any other API and call it on the request body. Here's some code:
+
+```go
+req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "https://api.yourllm.com/v1/chat/completions", payload)
+req.Header.Set("Content-Type", "application/json")
+req.Header.Set("Authorization", "Bearer "+yourKey)
+
+res, err := http.DefaultClient.Do(req)
+if err != nil {
+ // handle error
+}
+defer res.Body.Close() // don't forget!!
+
+for ev, err := range sse.Read(res.Body, nil) {
+ if err != nil {
+ // handle read error
+ break // can end the loop as Read stops on first error anyway
+ }
+ // Do something with the events, parse the JSON or whatever.
+}
+```
+
+See the [LLM example](cmd/llm/main.go) for a fully working Go program.
+
+Go 1.23 iterators (officially ["range-over-func"](https://go.dev/blog/range-functions)) are used for this feature. If you are still on Go 1.22 use the `GOEXPERIMENT=rangefunc` environment variable (e.g. `GOEXPERIMENT=rangefunc go run main.go`) or use the iterator without the syntactic sugar:
+```go
+events(func(ev Event) bool {
+ // do something with event
+ return true // or false to stop iteration
+})
+```
+
+`sse.Read` is also useful if you're implementing an LLM SDK: call it in your code and spare yourself time and maintenance burden by not reimplementing event stream parsing.
+
+## Implementing a server
+
+### Providers and why they are vital
+
+First, a server instance has to be created:
+
+```go
+import "github.com/tmaxmax/go-sse"
+
+s := &sse.Server{} // zero value ready to use!
+```
+
+The `sse.Server` type also implements the `http.Handler` interface, but a server is framework-agnostic: See the [`ServeHTTP` implementation](https://github.com/tmaxmax/go-sse/blob/master/server/server.go#L156) to learn how to implement your own custom logic. It also has some additional configuration options:
+
+```go
+s := &sse.Server{
+ Provider: /* what goes here? find out next! */,
+ OnSession: /* see Go docs for this one */,
+ Logger: /* see Go docs for this one, too */,
+}
+```
+
+What is this "provider"? A provider is an implementation of the publish-subscribe messaging system:
+
+```go
+type Provider interface {
+ // Publish a message to all subscribers of the given topics.
+ Publish(msg *Message, topics []string) error
+ // Add a new subscriber that is unsubscribed when the context is done.
+ Subscribe(ctx context.Context, sub Subscription) error
+ // Cleanup all resources and stop publishing messages or accepting subscriptions.
+ Shutdown(ctx context.Context) error
+}
+```
+
+The provider is what dispatches events to clients. When you publish a message (an event), the provider distributes it to all connections (subscribers). It is the central piece of the server: it determines the maximum number of clients your server can handle, the latency between broadcasting events and receiving them client-side and the maximum message throughput supported by your server. As different use cases have different needs, `go-sse` allows to plug in your own system. Some examples of such external systems are:
+
+- [RabbitMQ streams](https://blog.rabbitmq.com/posts/2021/07/rabbitmq-streams-overview/)
+- [Redis pub-sub](https://redis.io/topics/pubsub)
+- [Apache Kafka](https://kafka.apache.org/)
+- Your own! For example, you can mock providers in testing.
+
+If an external system is required, an adapter that satisfies the `Provider` interface must be created so it can then be used with `go-sse`. To implement such an adapter, read [the Provider documentation][2] for implementation requirements! And maybe share them with others: `go-sse` is built with reusability in mind!
+
+But in most cases the power and scalability that these external systems bring is not necessary, so `go-sse` comes with a default provider builtin. Read further!
+
+### Meet Joe, the default provider
+
+The server still works by default, without a provider. `go-sse` brings you Joe: the trusty, pure Go pub-sub implementation, who handles all your events by default! Befriend Joe as following:
+
+```go
+import "github.com/tmaxmax/go-sse"
+
+joe := &sse.Joe{} // the zero value is ready to use!
+```
+
+and he'll dispatch events all day! By default, he has no memory of what events he has received, but you can help him remember and replay older messages to new clients using a `Replayer`:
+
+```go
+type Replayer interface {
+ // Put a new event in the provider's buffer.
+ // If the provider automatically adds IDs as well,
+ // the returned message will also have the ID set,
+ // otherwise the input value is returned.
+ Put(msg *Message, topics []string) (*Message, error)
+ // Replay valid events to a subscriber.
+ Replay(sub Subscription) error
+}
+```
+
+`go-sse` provides two replayers by default, which both hold the events in-memory: the `ValidReplayer` and `FiniteReplayer`. The first replays events that are valid, not expired, the second replays a finite number of the most recent events. For example:
+
+```go
+// Let's have events expire after 5 minutes. For this example we don't enable automatic ID generation.
+r, err := sse.NewValidReplayer(time.Minute * 5, false)
+if err != nil {
+ // TTL was 0 or negative.
+ // Useful to have this error if the value comes from a config which happens to be faulty.
+}
+
+joe = &sse.Joe{Replayer: r}
+```
+
+will tell Joe to replay all valid events! Replayers can do so much more (for example, add IDs to events automatically): read the [docs][3] on how to use the existing ones and how to implement yours.
+
+You can also implement your own replayers: maybe you need persistent storage for your events? Or event validity is determined based on other criteria than expiry time? And if you think your replayer may be useful to others, you are encouraged to share it!
+
+`go-sse` created the `Replayer` interface mainly for `Joe`, but it encourages you to integrate it with your own `Provider` implementations, where suitable.
+
+### Publish your first event
+
+To publish events from the server, we use the `sse.Message` struct:
+
+```go
+import "github.com/tmaxmax/go-sse"
+
+m := &sse.Message{}
+m.AppendData("Hello world!", "Nice\nto see you.")
+```
+
+Now let's send it to our clients:
+
+```go
+var s *sse.Server
+
+s.Publish(m)
+```
+
+This is how clients will receive our event:
+
+```txt
+data: Hello world!
+data: Nice
+data: to see you.
+```
+
+You can also see that `go-sse` takes care of splitting input by lines into new fields, as required by the specification.
+
+Keep in mind that replayers, such as the `ValidReplayer` used above, will give an error for and won't replay the events without an ID (unless, of course, they give the IDs themselves). To have our event expire, as configured, we must set an ID for the event:
+
+```go
+m.ID = sse.ID("unique")
+```
+
+This is how the event will look:
+
+```txt
+id: unique
+data: Hello world!
+data: Nice
+data: to see you.
+```
+
+Now that it has an ID, the event will be considered expired 5 minutes after it's been published – it won't be replayed to clients after it expires!
+
+`sse.ID` is a function that returns an `EventID` – a special type that denotes an event's ID. An ID must not have newlines, so we must use special functions which validate the value beforehand. The `ID` constructor function we've used above panics (it is useful when creating IDs from static strings), but there's also `NewID`, which returns an error indicating whether the value was successfully converted to an ID or not:
+
+```go
+id, err := sse.NewID("invalid\nID")
+```
+
+Here, `err` will be non-nil and `id` will be an unset value: no `id` field will be sent to clients if you set an event's ID using that value!
+
+Setting the event's type (the `event` field) is equally easy:
+
+```go
+m.Type = sse.Type("The event's name")
+```
+
+Like IDs, types cannot have newlines. You are provided with constructors that follow the same convention: `Type` panics, `NewType` returns an error. Read the [docs][4] to find out more about messages and how to use them!
+
+### The server-side "Hello world"
+
+Now, let's put everything that we've learned together! We'll create a server that sends a "Hello world!" message every second to all its clients, with Joe's help:
+
+```go
+package main
+
+import (
+ "log"
+ "net/http"
+ "time"
+
+ "github.com/tmaxmax/go-sse"
+)
+
+func main() {
+ s := &sse.Server{}
+
+ go func() {
+ m := &sse.Message{}
+ m.AppendData("Hello world")
+
+ for range time.Tick(time.Second) {
+ _ = s.Publish(m)
+ }
+ }()
+
+ if err := http.ListenAndServe(":8000", s); err != nil {
+ log.Fatalln(err)
+ }
+}
+```
+
+Joe is our default provider here, as no provider is given to the server constructor. The server is already an `http.Handler` so we can use it directly with `http.ListenAndServe`.
+
+[Also see a more complex example!](cmd/complex/main.go)
+
+This is by far a complete presentation, make sure to read the docs in order to use `go-sse` to its full potential!
+
+## Using the client
+
+### Creating a client
+
+We will use the `sse.Client` type for connecting to event streams:
+
+```go
+type Client struct {
+ HTTPClient *http.Client
+ OnRetry backoff.Notify
+ ResponseValidator ResponseValidator
+ MaxRetries int
+ DefaultReconnectionTime time.Duration
+}
+```
+
+As you can see, it uses a `net/http` client. It also uses the [cenkalti/backoff][1] library for implementing auto-reconnect when a connection to a server is lost. Read the [client docs][5] and the Backoff library's docs to find out how to configure the client. We'll use the default client the package provides for further examples.
+
+### Initiating a connection
+
+We must first create an `http.Request` - yup, a fully customizable request:
+
+```go
+req, err := http.NewRequestWithContext(ctx, http.MethodGet, "host", http.NoBody)
+```
+
+Any kind of request is valid as long as your server handler supports it: you can do a GET, a POST, send a body; do whatever! The context is used as always for cancellation - to stop receiving events you will have to cancel the context.
+Let's initiate a connection with this request:
+
+```go
+import "github.com/tmaxmax/go-sse"
+
+conn := sse.DefaultClient.NewConnection(req)
+// you can also do sse.NewConnection(req)
+// it is an utility function that calls the
+// NewConnection method on the default client
+```
+
+### Subscribing to events
+
+Great! Let's imagine the event stream looks as following:
+
+```txt
+data: some unnamed event
+
+event: I have a name
+data: some data
+
+event: Another name
+data: some data
+```
+
+To receive the unnamed events, we subscribe to them as following:
+
+```go
+unsubscribe := conn.SubscribeMessages(func (event sse.Event) {
+ // do something with the event
+})
+```
+
+To receive the events named "I have a name":
+
+```go
+unsubscribe := conn.SubscribeEvent("I have a name", func (event sse.Event) {
+ // do something with the event
+})
+```
+
+If you want to subscribe to all events, regardless of their name:
+
+```go
+unsubscribe := conn.SubscribeToAll(func (event sse.Event) {
+ // do something with the event
+})
+```
+
+All `Subscribe` methods return a function that when called tells the connection to stop calling the corresponding callback.
+
+In order to work with events, the `sse.Event` type has some fields and methods exposed:
+
+```go
+type Event struct {
+ LastEventID string
+ Name string
+ Data string
+}
+```
+
+Pretty self-explanatory, but make sure to read the [docs][6]!
+
+Now, with this knowledge, let's subscribe to all unnamed events and, when the connection is established, print their data:
+
+```go
+unsubscribe := conn.SubscribeMessages(func(event sse.Event) {
+ fmt.Printf("Received an unnamed event: %s\n", event.Data)
+})
+```
+
+### Establishing the connection
+
+Great, we are subscribed now! Let's start receiving events:
+
+```go
+err := conn.Connect()
+```
+
+By calling `Connect`, the request created above will be sent to the server, and if successful, the subscribed callbacks will be called when new events are received. `Connect` returns only after all callbacks have finished executing.
+To stop calling a certain callback, call the unsubscribe function returned when subscribing. You can also subscribe new callbacks after calling Connect from a different goroutine.
+When using a `context.Context` to stop the connection, the error returned will be the context error – be it `context.Canceled`, `context.DeadlineExceeded` or a custom cause (when using `context.WithCancelCause`). In other words, a successfully closed `Connection` will always return an error – if the context error is not relevant, you can ignore it. For example:
+
+```go
+if err := conn.Connect(); !errors.Is(err, context.Canceled) {
+ // handle error
+}
+```
+
+A context created with `context.WithCancel`, or one with `context.WithCancelCause` and cancelled with the error `context.Canceled` is assumed above.
+
+There may be situations where the connection does not have to live for indeterminately long – for example when using the OpenAI API. In those situations, configure the client to not retry the connection and ignore `io.EOF` on return:
+
+```go
+client := sse.Client{
+ Backoff: sse.Backoff{
+ MaxRetries: -1,
+ },
+ // other settings...
+}
+
+req, _ := http.NewRequest(http.MethodPost, "https://api.openai.com/...", body)
+conn := client.NewConnection(req)
+
+conn.SubscribeMessages(/* callback */)
+
+if err := conn.Connect(); !errors.Is(err, io.EOF) {
+ // handle error
+}
+```
+
+### Connection lost?
+
+Either way, after receiving so many events, something went wrong and the server is temporarily down. Oh no! As a last hope, it has sent us the following event:
+
+```text
+retry: 60000
+: that's a minute in milliseconds and this
+: is a comment which is ignored by the client
+```
+
+Not a sweat, though! The connection will automatically be reattempted after a minute, when we'll hope the server's back up again. Canceling the request's context will cancel any reconnection attempt, too.
+
+If the server doesn't set a retry time, the client's `DefaultReconnectionTime` is used.
+
+### The "Hello world" server's client
+
+Let's use what we know to create a client for the previous server example:
+
+```go
+package main
+
+import (
+ "fmt"
+ "net/http"
+ "os"
+
+ "github.com/tmaxmax/go-sse"
+)
+
+func main() {
+ r, _ := http.NewRequest(http.MethodGet, "http://localhost:8000", nil)
+ conn := sse.NewConnection(r)
+
+ conn.SubscribeMessages(func(ev sse.Event) {
+ fmt.Printf("%s\n\n", ev.Data)
+ })
+
+ if err := conn.Connect(); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ }
+}
+```
+
+Yup, this is it! We are using the default client to receive all the unnamed events from the server. The output will look like this, when both programs are run in parallel:
+
+```txt
+Hello world!
+
+Hello world!
+
+Hello world!
+
+Hello world!
+
+...
+```
+
+[See the complex example's client too!](cmd/complex_client/main.go)
+
+## License
+
+This project is licensed under the [MIT license](LICENSE).
+
+## Contributing
+
+The library's in its early stages, so contributions are vital - I'm so glad you wish to improve `go-sse`! Maybe start by opening an issue first, to describe the intended modifications and further discuss how to integrate them. Open PRs to the `master` branch and wait for CI to complete. If all is clear, your changes will soon be merged! Also, make sure your changes come with an extensive set of tests and the code is formatted.
+
+Thank you for contributing!
+
+[1]: https://github.com/cenkalti/backoff
+[2]: https://pkg.go.dev/github.com/tmaxmax/go-sse#Provider
+[3]: https://pkg.go.dev/github.com/tmaxmax/go-sse#Replayer
+[4]: https://pkg.go.dev/github.com/tmaxmax/go-sse#Message
+[5]: https://pkg.go.dev/github.com/tmaxmax/go-sse#Client
+[6]: https://pkg.go.dev/github.com/tmaxmax/go-sse#Event
diff --git a/vendor/github.com/tmaxmax/go-sse/client.go b/vendor/github.com/tmaxmax/go-sse/client.go
new file mode 100644
index 0000000000..a0a576be22
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/client.go
@@ -0,0 +1,229 @@
+package sse
+
+import (
+ "fmt"
+ "math/rand"
+ "net/http"
+ "strings"
+ "time"
+ "unicode"
+)
+
+// The ResponseValidator type defines the type of the function
+// that checks whether server responses are valid, before starting
+// to read events from them. See the Client's documentation for more info.
+//
+// These errors are considered permanent and thus if the client is configured
+// to retry on error no retry is attempted and the error is returned.
+type ResponseValidator func(*http.Response) error
+
+// The Client struct is used to initialize new connections to different servers.
+// It is safe for concurrent use.
+//
+// After connections are created, the Connect method must be called to start
+// receiving events.
+type Client struct {
+ // The HTTP client to be used. Defaults to http.DefaultClient.
+ HTTPClient *http.Client
+ // A callback that's executed whenever a reconnection attempt starts.
+ // It receives the error that caused the retry and the reconnection time.
+ OnRetry func(error, time.Duration)
+ // A function to check if the response from the server is valid.
+ // Defaults to a function that checks the response's status code is 200
+ // and the content type is text/event-stream.
+ //
+ // If the error type returned has a Temporary or a Timeout method,
+ // they will be used to determine whether to reattempt the connection.
+ // Otherwise, the error will be considered permanent and no reconnections
+ // will be attempted.
+ ResponseValidator ResponseValidator
+ // Backoff configures the backoff strategy. See the documentation of
+ // each field for more information.
+ Backoff Backoff
+}
+
+// Backoff configures the reconnection strategy of a Connection.
+type Backoff struct {
+ // The initial wait time before a reconnection is attempted.
+ // Must be >0. Defaults to 500ms.
+ InitialInterval time.Duration
+ // How much should the reconnection time grow on subsequent attempts.
+ // Must be >=1; 1 = constant interval. Defaults to 1.5.
+ Multiplier float64
+ // How much does the reconnection time vary relative to the base value.
+ // This is useful to prevent multiple clients to reconnect at the exact
+ // same time, as it makes the wait times distinct.
+ // Must be in range (0, 1); -1 = no randomization. Defaults to 0.5.
+ Jitter float64
+ // How much can the wait time grow.
+ // If <=0 = the wait time can infinitely grow. Defaults to infinite growth.
+ MaxInterval time.Duration
+ // How much time can retries be attempted.
+ // For example, if this is 5 seconds, after 5 seconds the client
+ // will stop retrying.
+ // If <=0 = no limit. Defaults to no limit.
+ MaxElapsedTime time.Duration
+ // How many retries are allowed.
+ // <0 = no retries, 0 = infinite. Defaults to infinite retries.
+ MaxRetries int
+}
+
+// NewConnection initializes and configures a connection. On connect, the given
+// request is sent and if successful the connection starts receiving messages.
+// Use the request's context to stop the connection.
+//
+// If the request has a body, it is necessary to provide a GetBody function in order
+// for the connection to be reattempted, in case of an error. Using readers
+// such as bytes.Reader, strings.Reader or bytes.Buffer when creating a request
+// using http.NewRequestWithContext will ensure this function is present on the request.
+func (c *Client) NewConnection(r *http.Request) *Connection {
+ if r == nil {
+ panic("go-sse.client.NewConnection: request cannot be nil")
+ }
+
+ mergeDefaults(c)
+
+ conn := &Connection{
+ client: *c, // we clone the client so the config cannot be modified from outside
+ request: r.Clone(r.Context()), // we clone the request so its fields cannot be modified from outside
+ callbacks: map[string]map[int]EventCallback{},
+ callbacksAll: map[int]EventCallback{},
+ }
+
+ return conn
+}
+
+// DefaultValidator is the default client response validation function. As per the spec,
+// It checks the content type to be text/event-stream and the response status code to be 200 OK.
+//
+// If this validator fails, errors are considered permanent. No retry attempts are made.
+//
+// See https://html.spec.whatwg.org/multipage/server-sent-events.html#sse-processing-model.
+var DefaultValidator ResponseValidator = func(r *http.Response) error {
+ if r.StatusCode != http.StatusOK {
+ return fmt.Errorf("expected status code %d %s, received %d %s", http.StatusOK, http.StatusText(http.StatusOK), r.StatusCode, http.StatusText(r.StatusCode))
+ }
+ cts := r.Header.Get("Content-Type")
+ ct := contentType(cts)
+ if expected := "text/event-stream"; ct != expected {
+ return fmt.Errorf("expected content type to have %q, received %q", expected, cts)
+ }
+ return nil
+}
+
+// NoopValidator is a client response validator function that treats all responses as valid.
+var NoopValidator ResponseValidator = func(_ *http.Response) error {
+ return nil
+}
+
+// DefaultClient is the client that is used when creating a new connection using the NewConnection function.
+// Unset properties on new clients are replaced with the ones set for the default client.
+var DefaultClient = &Client{
+ HTTPClient: http.DefaultClient,
+ ResponseValidator: DefaultValidator,
+ Backoff: Backoff{
+ InitialInterval: time.Millisecond * 500,
+ Multiplier: 1.5,
+ Jitter: 0.5,
+ },
+}
+
+// NewConnection creates a connection using the default client.
+func NewConnection(r *http.Request) *Connection {
+ return DefaultClient.NewConnection(r)
+}
+
+func mergeDefaults(c *Client) {
+ if c.HTTPClient == nil {
+ c.HTTPClient = DefaultClient.HTTPClient
+ }
+ if c.Backoff.InitialInterval <= 0 {
+ c.Backoff.InitialInterval = DefaultClient.Backoff.InitialInterval
+ }
+ if c.Backoff.Multiplier < 1 {
+ c.Backoff.Multiplier = DefaultClient.Backoff.Multiplier
+ }
+ if c.Backoff.Jitter <= 0 || c.Backoff.Jitter >= 1 {
+ c.Backoff.Jitter = DefaultClient.Backoff.Jitter
+ }
+ if c.ResponseValidator == nil {
+ c.ResponseValidator = DefaultClient.ResponseValidator
+ }
+}
+
+func contentType(header string) string {
+ cts := strings.FieldsFunc(header, func(r rune) bool {
+ return unicode.IsSpace(r) || r == ';' || r == ','
+ })
+ if len(cts) == 0 {
+ return ""
+ }
+ return strings.ToLower(cts[0])
+}
+
+type backoffController struct {
+ start time.Time
+ rng *rand.Rand
+ b *Backoff
+ interval time.Duration
+ numRetries int
+}
+
+func (b *Backoff) new() backoffController {
+ now := time.Now()
+ return backoffController{
+ start: now,
+ rng: rand.New(rand.NewSource(now.UnixNano())),
+ b: b,
+ interval: b.InitialInterval,
+ numRetries: 0,
+ }
+}
+
+// reset the backoff to the initial state, i.e. as if no retries have occurred.
+// If newInterval is greater than 0, the initial interval is changed to it.
+func (c *backoffController) reset(newInterval time.Duration) {
+ if newInterval > 0 {
+ c.interval = newInterval
+ } else {
+ c.interval = c.b.InitialInterval
+ }
+ c.numRetries = 0
+ c.start = time.Now()
+}
+
+func (c *backoffController) next() (interval time.Duration, shouldRetry bool) {
+ if c.b.MaxRetries < 0 || (c.b.MaxRetries > 0 && c.numRetries == c.b.MaxRetries) {
+ return 0, false
+ }
+
+ c.numRetries++
+ elapsed := time.Since(c.start)
+ next := nextInterval(c.b.Jitter, c.rng, c.interval)
+ c.interval = growInterval(c.interval, c.b.MaxInterval, c.b.Multiplier)
+
+ if c.b.MaxElapsedTime > 0 && elapsed+next > c.b.MaxElapsedTime {
+ return 0, false
+ }
+
+ return next, true
+}
+
+func nextInterval(jitter float64, rng *rand.Rand, current time.Duration) time.Duration {
+ if jitter == -1 {
+ return current
+ }
+
+ delta := jitter * float64(current)
+ minInterval := float64(current) - delta
+ maxInterval := float64(current) + delta
+
+ return time.Duration(minInterval + (rng.Float64() * (maxInterval - minInterval + 1)))
+}
+
+func growInterval(current, maxInterval time.Duration, mul float64) time.Duration {
+ if maxInterval > 0 && float64(current) >= float64(maxInterval)/mul {
+ return maxInterval
+ }
+ return time.Duration(float64(current) * mul)
+}
diff --git a/vendor/github.com/tmaxmax/go-sse/client_connection.go b/vendor/github.com/tmaxmax/go-sse/client_connection.go
new file mode 100644
index 0000000000..a901952509
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/client_connection.go
@@ -0,0 +1,277 @@
+package sse
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+
+ "github.com/tmaxmax/go-sse/internal/parser"
+)
+
+// EventCallback is a function that is used to receive events from a Connection.
+type EventCallback func(Event)
+
+// EventCallbackRemover is a function that removes an already registered callback
+// from a connection. Calling it multiple times is a no-op.
+type EventCallbackRemover func()
+
+// Connection is a connection to an events stream. Created using the Client struct,
+// a Connection processes the incoming events and calls the subscribed event callbacks.
+// If the connection to the server temporarily fails, the connection will be reattempted.
+// Retry values received from servers will be taken into account.
+//
+// Connections must not be copied after they are created.
+type Connection struct { //nolint:govet // The current order aids readability.
+ mu sync.RWMutex
+ request *http.Request
+ callbacks map[string]map[int]EventCallback
+ callbacksAll map[int]EventCallback
+ lastEventID string
+ client Client
+ buf []byte
+ bufMaxSize int
+ callbackID int
+ isRetry bool
+}
+
+// SubscribeMessages subscribes the given callback to all events without type (without or with empty `event` field).
+// Remove the callback by calling the returned function.
+func (c *Connection) SubscribeMessages(cb EventCallback) EventCallbackRemover {
+ return c.SubscribeEvent("", cb)
+}
+
+// SubscribeEvent subscribes the given callback to all the events with the provided type
+// (the `event` field has the value given here).
+// Remove the callback by calling the returned function.
+func (c *Connection) SubscribeEvent(typ string, cb EventCallback) EventCallbackRemover {
+ return c.addSubscriber(typ, cb)
+}
+
+// SubscribeToAll subscribes the given callback to all events, with or without type.
+// Remove the callback by calling the returned function.
+func (c *Connection) SubscribeToAll(cb EventCallback) EventCallbackRemover {
+ return c.addSubscriberToAll(cb)
+}
+
+func (c *Connection) addSubscriberToAll(cb EventCallback) EventCallbackRemover {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ id := c.callbackID
+ c.callbacksAll[id] = cb
+ c.callbackID++
+
+ return func() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ delete(c.callbacksAll, id)
+ }
+}
+
+func (c *Connection) addSubscriber(event string, cb EventCallback) EventCallbackRemover {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if _, ok := c.callbacks[event]; !ok {
+ c.callbacks[event] = map[int]EventCallback{}
+ }
+
+ id := c.callbackID
+ c.callbacks[event][id] = cb
+ c.callbackID++
+
+ return func() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ delete(c.callbacks[event], id)
+ if len(c.callbacks[event]) == 0 {
+ delete(c.callbacks, event)
+ }
+ }
+}
+
+// Buffer sets the underlying buffer to be used when scanning events.
+// Use this if you need to read very large events (bigger than the default
+// of 65K bytes).
+//
+// Read the documentation of bufio.Scanner.Buffer for more information.
+func (c *Connection) Buffer(buf []byte, maxSize int) {
+ c.buf = buf
+ c.bufMaxSize = maxSize
+}
+
+// ConnectionError is the type that wraps all the connection errors that occur.
+type ConnectionError struct {
+ // The request for which the connection failed.
+ Req *http.Request
+ // The reason the operation failed.
+ Err error
+ // The reason why the request failed.
+ Reason string
+}
+
+func (e *ConnectionError) Error() string {
+ return fmt.Sprintf("request failed: %s: %v", e.Reason, e.Err)
+}
+
+func (e *ConnectionError) Unwrap() error {
+ return e.Err
+}
+
+func (c *Connection) resetRequest() error {
+ if !c.isRetry {
+ c.isRetry = true
+ return nil
+ }
+ if err := resetRequestBody(c.request); err != nil {
+ return err
+ }
+ if c.lastEventID == "" {
+ c.request.Header.Del("Last-Event-ID")
+ } else {
+ c.request.Header.Set("Last-Event-ID", c.lastEventID)
+ }
+ return nil
+}
+
+func (c *Connection) dispatch(ev Event) {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ cbs := c.callbacks[ev.Type]
+ cbCount := len(cbs) + len(c.callbacksAll)
+ if cbCount == 0 {
+ return
+ }
+
+ for _, cb := range c.callbacks[ev.Type] {
+ cb(ev)
+ }
+ for _, cb := range c.callbacksAll {
+ cb(ev)
+ }
+}
+
+func (c *Connection) read(r io.Reader, setRetry func(time.Duration)) error {
+ pf := func() *parser.Parser {
+ p := parser.New(r)
+ if c.buf != nil || c.bufMaxSize > 0 {
+ p.Buffer(c.buf, c.bufMaxSize)
+ }
+ return p
+ }
+
+ var readErr error
+ read(pf, c.lastEventID, func(r int64) { setRetry(time.Duration(r) * time.Millisecond) }, false)(func(e Event, err error) bool {
+ if err != nil {
+ readErr = err
+ return false
+ }
+ c.lastEventID = e.LastEventID
+ c.dispatch(e)
+ return true
+ })
+
+ return readErr
+}
+
+// Connect sends the request the connection was created with to the server
+// and, if successful, it starts receiving events. The caller goroutine
+// is blocked until the request's context is done or an error occurs.
+//
+// If the request's context is cancelled, Connect returns its error.
+// Otherwise, if the maximum number of retries is made, the last error
+// that occurred is returned. Connect never returns otherwise – either
+// the context is cancelled, or it's done retrying.
+//
+// All errors returned other than the context errors will be wrapped
+// inside a *ConnectionError.
+func (c *Connection) Connect() error {
+ ctx := c.request.Context()
+ backoff := c.client.Backoff.new()
+
+ c.request.Header.Set("Accept", "text/event-stream")
+ c.request.Header.Set("Connection", "keep-alive")
+ c.request.Header.Set("Cache", "no-cache")
+
+ t := time.NewTimer(0)
+ defer t.Stop()
+
+ for {
+ select {
+ case <-t.C:
+ shouldRetry, err := c.doConnect(ctx, backoff.reset)
+ if !shouldRetry {
+ return err
+ }
+
+ next, shouldRetry := backoff.next()
+ if !shouldRetry {
+ return err
+ }
+
+ if c.client.OnRetry != nil {
+ c.client.OnRetry(err, next)
+ }
+
+ t.Reset(next)
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+}
+
+func (c *Connection) doConnect(ctx context.Context, setRetry func(time.Duration)) (shouldRetry bool, err error) {
+ if err := c.resetRequest(); err != nil {
+ return false, &ConnectionError{Req: c.request, Reason: "request reset failed", Err: err}
+ }
+
+ res, err := c.client.HTTPClient.Do(c.request)
+ if err != nil {
+ concrete := err.(*url.Error) //nolint:errorlint // We know the concrete type here
+ if errors.Is(err, ctx.Err()) {
+ return false, concrete.Err
+ }
+ return true, &ConnectionError{Req: c.request, Reason: "connection to server failed", Err: concrete.Err}
+ }
+ defer res.Body.Close()
+
+ if err := c.client.ResponseValidator(res); err != nil {
+ return false, &ConnectionError{Req: c.request, Reason: "response validation failed", Err: err}
+ }
+
+ setRetry(0)
+
+ err = c.read(res.Body, setRetry)
+ if errors.Is(err, ctx.Err()) {
+ return false, err
+ }
+
+ return true, &ConnectionError{Req: c.request, Reason: "connection to server lost", Err: err}
+}
+
+// ErrNoGetBody is a sentinel error returned when the connection cannot be reattempted
+// due to GetBody not existing on the original request.
+var ErrNoGetBody = errors.New("the GetBody function doesn't exist on the request")
+
+func resetRequestBody(r *http.Request) error {
+ if r.Body == nil || r.Body == http.NoBody {
+ return nil
+ }
+ if r.GetBody == nil {
+ return ErrNoGetBody
+ }
+ body, err := r.GetBody()
+ if err != nil {
+ return err
+ }
+ r.Body = body
+ return nil
+}
diff --git a/vendor/github.com/tmaxmax/go-sse/codecov.yml b/vendor/github.com/tmaxmax/go-sse/codecov.yml
new file mode 100644
index 0000000000..fd35a5fac2
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/codecov.yml
@@ -0,0 +1,3 @@
+ignore:
+ - cmd
+ - internal/tests
\ No newline at end of file
diff --git a/vendor/github.com/tmaxmax/go-sse/event.go b/vendor/github.com/tmaxmax/go-sse/event.go
new file mode 100644
index 0000000000..989bb9dd8b
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/event.go
@@ -0,0 +1,133 @@
+package sse
+
+import (
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/tmaxmax/go-sse/internal/parser"
+)
+
+// The Event struct represents an event sent to the client by a server.
+type Event struct {
+ // The last non-empty ID of all the events received. This may not be
+ // the ID of the latest event!
+ LastEventID string
+ // The event's type. It is empty if the event is unnamed.
+ Type string
+ // The event's payload.
+ Data string
+}
+
+// ReadConfig is used to configure how Read behaves.
+type ReadConfig struct {
+ // MaxEventSize is the maximum expected length of the byte sequence
+ // representing a single event. Parsing events longer than that
+ // will result in an error.
+ //
+ // By default this limit is 64KB. You don't need to set this if it
+ // is enough for your needs (e.g. the events you receive don't contain
+ // larger amounts of data).
+ MaxEventSize int
+}
+
+// Read parses an SSE stream and yields all incoming events,
+// On any encountered errors iteration stops and no further events are parsed –
+// the loop can safely be ended on error. If EOF is reached, the Read operation
+// is considered successful and no error is returned. An Event will never
+// be yielded together with an error.
+//
+// Read is especially useful for parsing responses from services which
+// communicate using SSE but not over long-lived connections – for example,
+// LLM APIs.
+//
+// Read handles the Event.LastEventID value just as the browser SSE client
+// (EventSource) would – for every event, the last encountered event ID will be given,
+// even if the ID is not the current event's ID. Read, unlike EventSource, does
+// not set Event.Type to "message" if no "event" field is received, leaving
+// it blank.
+//
+// Read provides no way to handle the "retry" field and doesn't handle retrying.
+// Use a Client and a Connection if you need to retry requests.
+func Read(r io.Reader, cfg *ReadConfig) func(func(Event, error) bool) {
+ pf := func() *parser.Parser {
+ p := parser.New(r)
+ if cfg != nil && cfg.MaxEventSize > 0 {
+ // NOTE(tmaxmax): we don't allow setting the buffer at the moment.
+ // ReadConfig objects might be shared between Read calls executed in
+ // different goroutines and having an actual []byte in it seems dangerous.
+ // If there is demand it can be added.
+ p.Buffer(nil, cfg.MaxEventSize)
+ }
+ return p
+ }
+
+ // We take a factory function for the parser so that Read can be inlined by the compiler.
+ return read(pf, "", nil, true)
+}
+
+func read(pf func() *parser.Parser, lastEventID string, onRetry func(int64), ignoreEOF bool) func(func(Event, error) bool) {
+ return func(yield func(Event, error) bool) {
+ p := pf()
+
+ typ, sb, dirty := "", strings.Builder{}, false
+ doYield := func(data string) bool {
+ if data != "" {
+ data = data[:len(data)-1]
+ }
+ return yield(Event{LastEventID: lastEventID, Data: data, Type: typ}, nil)
+ }
+
+ for f := (parser.Field{}); p.Next(&f); {
+ switch f.Name { //nolint:exhaustive // Comment fields are not parsed.
+ case parser.FieldNameData:
+ sb.WriteString(f.Value)
+ sb.WriteByte('\n')
+ dirty = true
+ case parser.FieldNameEvent:
+ typ = f.Value
+ dirty = true
+ case parser.FieldNameID:
+ // empty IDs are valid, only IDs that contain the null byte must be ignored:
+ // https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation
+ if strings.IndexByte(f.Value, 0) != -1 {
+ break
+ }
+
+ lastEventID = f.Value
+ dirty = true
+ case parser.FieldNameRetry:
+ n, err := strconv.ParseInt(f.Value, 10, 64)
+ if err != nil {
+ break
+ }
+ if n >= 0 && onRetry != nil {
+ onRetry(n)
+ dirty = true
+ }
+ default:
+ if dirty {
+ if !doYield(sb.String()) {
+ return
+ }
+ sb.Reset()
+ typ = ""
+ dirty = false
+ }
+ }
+ }
+
+ err := p.Err()
+ isEOF := err == io.EOF //nolint:errorlint // Our scanner returns io.EOF unwrapped
+
+ if dirty && isEOF {
+ if !doYield(sb.String()) {
+ return
+ }
+ }
+
+ if err != nil && !(ignoreEOF && isEOF) {
+ yield(Event{}, err)
+ }
+ }
+}
diff --git a/vendor/github.com/tmaxmax/go-sse/internal/parser/chunk.go b/vendor/github.com/tmaxmax/go-sse/internal/parser/chunk.go
new file mode 100644
index 0000000000..449b715bee
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/internal/parser/chunk.go
@@ -0,0 +1,41 @@
+package parser
+
+// isNewlineChar returns whether the given character is '\n' or '\r'.
+func isNewlineChar(b byte) bool {
+ return b == '\n' || b == '\r'
+}
+
+// NewlineIndex returns the index of the first occurrence of a newline sequence (\n, \r, or \r\n).
+// It also returns the sequence's length. If no sequence is found, index is equal to len(s)
+// and length is 0.
+//
+// The newline is defined in the Event Stream standard's documentation:
+// https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events
+func NewlineIndex(s string) (index, length int) {
+ for l := len(s); index < l; index++ {
+ b := s[index]
+
+ if isNewlineChar(b) {
+ length++
+ if b == '\r' && index < l-1 && s[index+1] == '\n' {
+ length++
+ }
+
+ break
+ }
+ }
+
+ return
+}
+
+// NextChunk retrieves the next chunk of data from the given string
+// along with the data remaining after the returned chunk.
+// A chunk is a string of data delimited by a newline.
+// If the returned chunk is the last one, len(remaining) will be 0.
+//
+// The newline is defined in the Event Stream standard's documentation:
+// https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events
+func NextChunk(s string) (chunk, remaining string, hasNewline bool) {
+ index, endlineLen := NewlineIndex(s)
+ return s[:index], s[index+endlineLen:], endlineLen != 0
+}
diff --git a/vendor/github.com/tmaxmax/go-sse/internal/parser/field.go b/vendor/github.com/tmaxmax/go-sse/internal/parser/field.go
new file mode 100644
index 0000000000..0262b16a2d
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/internal/parser/field.go
@@ -0,0 +1,44 @@
+package parser
+
+// FieldName is the name of the field.
+type FieldName string
+
+// A Field represents an unprocessed field of a single event. The Name is the field's identifier, which is used to
+// process the fields afterwards.
+//
+// As a special case, if a parser (FieldParser or Parser) returns a field without a name,
+// it means that a whole event was parsed. In other words, all the fields before the one without a name
+// and after another such field are part of the same event.
+type Field struct {
+ Name FieldName
+ Value string
+}
+
+// Valid field names.
+const (
+ FieldNameData = FieldName("data")
+ FieldNameEvent = FieldName("event")
+ FieldNameRetry = FieldName("retry")
+ FieldNameID = FieldName("id")
+ // FieldNameComment is a sentinel value that indicates
+ // comment fields. It is not a valid field name that should
+ // be written to an SSE stream.
+ FieldNameComment = FieldName(":")
+
+ maxFieldNameLength = 5
+)
+
+func getFieldName(b string) (FieldName, bool) {
+ switch FieldName(b) { //nolint:exhaustive // Cannot have Comment here
+ case FieldNameData:
+ return FieldNameData, true
+ case FieldNameEvent:
+ return FieldNameEvent, true
+ case FieldNameRetry:
+ return FieldNameRetry, true
+ case FieldNameID:
+ return FieldNameID, true
+ default:
+ return "", false
+ }
+}
diff --git a/vendor/github.com/tmaxmax/go-sse/internal/parser/field_parser.go b/vendor/github.com/tmaxmax/go-sse/internal/parser/field_parser.go
new file mode 100644
index 0000000000..431cf2f4e0
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/internal/parser/field_parser.go
@@ -0,0 +1,129 @@
+package parser
+
+import (
+ "errors"
+ "strings"
+)
+
+// FieldParser extracts fields from a byte slice.
+type FieldParser struct {
+ err error
+ data string
+
+ started bool
+
+ keepComments bool
+ removeBOM bool
+}
+
+func trimFirstSpace(c string) string {
+ if c != "" && c[0] == ' ' {
+ return c[1:]
+ }
+ return c
+}
+
+func (f *FieldParser) scanSegment(chunk string, out *Field) bool {
+ colonPos, l := strings.IndexByte(chunk, ':'), len(chunk)
+ if colonPos > maxFieldNameLength {
+ return false
+ }
+ if colonPos == -1 {
+ colonPos = l
+ }
+
+ name, ok := getFieldName(chunk[:colonPos])
+ if ok {
+ out.Name = name
+ out.Value = trimFirstSpace(chunk[min(colonPos+1, l):])
+ return true
+ } else if chunk == "" {
+ // scanSegment is called only with chunks which end with a newline in the input.
+ // If chunk is empty, it means that this is a blank line which ends the event,
+ // so an empty Field needs to be returned.
+ out.Name = ""
+ out.Value = ""
+ return true
+ } else if colonPos == 0 && f.keepComments {
+ out.Name = FieldNameComment
+ out.Value = trimFirstSpace(chunk[min(1, l):])
+ return true
+ }
+
+ return false
+}
+
+// ErrUnexpectedEOF is returned when the input is completely parsed but no complete field was found at the end.
+var ErrUnexpectedEOF = errors.New("go-sse: unexpected end of input")
+
+// Next parses the next available field in the remaining buffer.
+// It returns false if there are no more fields to parse.
+func (f *FieldParser) Next(r *Field) bool {
+ for f.data != "" {
+ f.started = true
+
+ chunk, rem, hasNewline := NextChunk(f.data)
+ if !hasNewline {
+ f.err = ErrUnexpectedEOF
+ return false
+ }
+
+ f.data = rem
+
+ if !f.scanSegment(chunk, r) {
+ continue
+ }
+
+ return true
+ }
+
+ return false
+}
+
+// Reset changes the buffer from which fields are parsed.
+func (f *FieldParser) Reset(data string) {
+ f.data = data
+ f.err = nil
+ f.started = false
+ f.doRemoveBOM()
+}
+
+// Err returns the last error encountered by the parser. It is either nil or ErrUnexpectedEOF.
+func (f *FieldParser) Err() error {
+ return f.err
+}
+
+// Started tells whether parsing has started (a call to Next which consumed input was made
+// or the BOM was removed, if it existed). Started will be true if the FieldParser has advanced
+// through the data.
+func (f *FieldParser) Started() bool {
+ return f.started
+}
+
+// KeepComments configures the FieldParser to parse/ignore comment fields.
+// By default comment fields are ignored.
+func (f *FieldParser) KeepComments(shouldKeep bool) {
+ f.keepComments = shouldKeep
+}
+
+// RemoveBOM configures the FieldParser to try and remove the Unicode BOM
+// when parsing the first field, if it exists.
+// If, at the time this option is set, the input is untouched (no fields were parsed),
+// it will also be attempted to remove the BOM.
+func (f *FieldParser) RemoveBOM(shouldRemove bool) {
+ f.removeBOM = shouldRemove
+ f.doRemoveBOM()
+}
+
+func (f *FieldParser) doRemoveBOM() {
+ const bom = "\xEF\xBB\xBF"
+ if f.removeBOM && !f.started && strings.HasPrefix(f.data, bom) {
+ f.data = f.data[len(bom):]
+ f.started = true
+ }
+}
+
+// NewFieldParser creates a parser that extracts fields from the given string.
+func NewFieldParser(data string) *FieldParser {
+ return &FieldParser{data: data}
+}
diff --git a/vendor/github.com/tmaxmax/go-sse/internal/parser/parser.go b/vendor/github.com/tmaxmax/go-sse/internal/parser/parser.go
new file mode 100644
index 0000000000..8ebf28cafb
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/internal/parser/parser.go
@@ -0,0 +1,117 @@
+package parser
+
+import (
+ "bufio"
+ "io"
+ "unsafe"
+)
+
+// splitFunc is a split function for a bufio.Scanner that splits a sequence of
+// bytes into SSE events. Each event ends with two consecutive newline sequences,
+// where a newline sequence is defined as either "\n", "\r", or "\r\n".
+func splitFunc(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ if len(data) == 0 {
+ return 0, nil, nil
+ }
+
+ var start int
+ for {
+ index, endlineLen := NewlineIndex(unsafe.String(unsafe.SliceData(data), len(data))[advance:])
+ advance += index + endlineLen
+ if index == 0 {
+ // If it was a blank line, skip it.
+ start += endlineLen
+ }
+ // We've reached the end of data or a second newline follows and the line isn't blank.
+ // The latter means we have an event.
+ if advance == len(data) || (isNewlineChar(data[advance]) && index > 0) {
+ break
+ }
+ }
+
+ if l := len(data); advance == l && !atEOF {
+ // We have reached the end of the buffer but have not yet seen two consecutive
+ // newline sequences, so we request more data.
+ return 0, nil, nil
+ } else if advance < l {
+ // We have found a newline. Consume the end-of-line sequence.
+ advance++
+ // Consume one more character if end-of-line is "\r\n".
+ if advance < l && data[advance-1] == '\r' && data[advance] == '\n' {
+ advance++
+ }
+ }
+
+ token = data[start:advance]
+
+ return advance, token, nil
+}
+
+// Parser extracts fields from a reader. Reading is buffered using a bufio.Scanner.
+// The Parser also removes the UTF-8 BOM if it exists.
+type Parser struct {
+ inputScanner *bufio.Scanner
+ fieldScanner *FieldParser
+}
+
+// Next parses a single field from the reader. It returns false when there are no more fields to parse.
+func (r *Parser) Next(f *Field) bool {
+ if !r.fieldScanner.Next(f) {
+ if !r.inputScanner.Scan() {
+ // Do this to signal EOF, which bufio.Scanner suppresses.
+ if r.inputScanner.Err() == nil {
+ r.inputScanner = nil
+ }
+ return false
+ }
+
+ if r.fieldScanner.Started() {
+ // If scanning was started, then an event was already processed at this point and the BOM was
+ // already removed, if it existed. We don't need to remove it anymore, so disable the option.
+ r.fieldScanner.RemoveBOM(false)
+ }
+
+ // The allocation made inside `Text` is not an issue and should even improve performance.
+ // If the Field returned from `Next` wouldn't own its resources, then the caller would have
+ // to allocate new memory and copy each field value. This way, not only the caller doesn't
+ // have to worry about allocations and ownership, but also bigger and less frequent allocations
+ // are made, compared to the previous usage – allocations are now made per event, not per field value.
+ r.fieldScanner.Reset(r.inputScanner.Text())
+
+ return r.fieldScanner.Next(f)
+ }
+
+ return true
+}
+
+// Err returns the last read error. At the end of input
+// it will always be equal to io.EOF.
+func (r *Parser) Err() error {
+ if err := r.fieldScanner.Err(); err != nil {
+ return err
+ }
+ if r.inputScanner == nil {
+ // Recover the EOF suppressed by bufio.Scanner.
+ // We need it inside the client, to know when to retry.
+ return io.EOF
+ }
+ return r.inputScanner.Err()
+}
+
+// Buffer sets the buffer used to scan the input.
+// For more information, see the documentation on bufio.Scanner.Buffer.
+// Do not call this after parsing has started – the method will panic!
+func (r *Parser) Buffer(buf []byte, maxSize int) {
+ r.inputScanner.Buffer(buf, maxSize)
+}
+
+// New returns a Parser that extracts fields from a reader.
+func New(r io.Reader) *Parser {
+ sc := bufio.NewScanner(r)
+ sc.Split(splitFunc)
+
+ fsc := NewFieldParser("")
+ fsc.RemoveBOM(true)
+
+ return &Parser{inputScanner: sc, fieldScanner: fsc}
+}
diff --git a/vendor/github.com/tmaxmax/go-sse/joe.go b/vendor/github.com/tmaxmax/go-sse/joe.go
new file mode 100644
index 0000000000..d693774d96
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/joe.go
@@ -0,0 +1,327 @@
+package sse
+
+import (
+ "context"
+ "runtime/debug"
+ "sync"
+)
+
+// A Replayer is a type that can replay older published events to new subscribers.
+// Replayers use event IDs, the topics the events were published and optionally
+// any other criteria to determine which are valid for replay.
+//
+// While replayers can require events to have IDs beforehand, they can also set the IDs themselves,
+// automatically - it's up to the implementation. Replayers should not overwrite or remove any existing
+// IDs and return an error instead.
+//
+// Replayers are not required to be thread-safe - server providers are required to ensure only
+// one operation is executed on the replayer at any given time. Server providers may not execute
+// replay operation concurrently with other operations, so make sure any action on the replayer
+// blocks for as little as possible. If a replayer is thread-safe, some operations may be
+// run in a separate goroutine - see the interface's method documentation.
+//
+// Executing actions that require waiting for a long time on I/O, such as HTTP requests or database
+// calls must be handled with great care, so the server provider is not blocked. Reducing them to
+// the minimum by using techniques such as caching or by executing them in separate goroutines is
+// recommended, as long as the implementation fulfills the requirements.
+//
+// If not specified otherwise, the errors returned are implementation-specific.
+type Replayer interface {
+ // Put adds a new event to the replay buffer. The Message that is returned may not have the
+ // same address, if the replayer automatically sets IDs.
+ //
+ // Put errors if the message couldn't be queued – if no topics are provided,
+ // a message without an ID is put into a Replayer which does not
+ // automatically set IDs, or a message with an ID is put into a Replayer which
+ // does automatically set IDs. An error should be returned for other failures
+ // related to the given message. When no topics are provided, ErrNoTopic should be
+ // returned.
+ //
+ // The Put operation may be executed by the replayer in another goroutine only if
+ // it can ensure that any Replay operation called after the Put goroutine is started
+ // can replay the new received message. This also requires the replayer implementation
+ // to be thread-safe.
+ //
+ // Replayers are not required to guarantee that immediately after Put returns
+ // the new messages can be replayed. If an error occurs internally when putting the new message
+ // and retrying the operation would block for too long, it can be aborted.
+ //
+ // To indicate a complete replayer failure (i.e. the replayer won't work after this point)
+ // a panic should be used instead of an error.
+ Put(message *Message, topics []string) (*Message, error)
+ // Replay sends to a new subscriber all the valid events received by the replayer
+ // since the event with the listener's ID. If the ID the listener provides
+ // is invalid, the provider should not replay any events.
+ //
+ // Replay calls must return only after replaying is done.
+ // Implementations should not keep references to the subscription client
+ // after Replay returns.
+ //
+ // If an error is returned, then at least some messages weren't successfully replayed.
+ // The error is nil if there were no messages to replay for the particular subscription
+ // or if all messages were replayed successfully.
+ //
+ // If any messages are replayed, Client.Flush must be called by implementations.
+ Replay(subscription Subscription) error
+}
+
+type (
+ subscriber chan<- error
+ subscription struct {
+ done subscriber
+ Subscription
+ }
+
+ messageWithTopics struct {
+ message *Message
+ topics []string
+ }
+
+ publishedMessage struct {
+ replayerErr chan<- error
+ messageWithTopics
+ }
+)
+
+// Joe is a basic server provider that synchronously executes operations by queueing them in channels.
+// Events are also sent synchronously to subscribers, so if a subscriber's callback blocks, the others
+// have to wait.
+//
+// Joe optionally supports event replaying with the help of a Replayer.
+//
+// If the replayer panics, the subscription for which it panicked is considered failed
+// and an error is returned, and thereafter the replayer is not used anymore – no replays
+// will be attempted for future subscriptions.
+// If due to some other unexpected scenario something panics internally, Joe will remove all subscribers
+// and close itself, so subscribers don't end up blocked.
+//
+// He serves simple use-cases well, as he's light on resources, and does not require any external
+// services. Also, he is the default provider for Servers.
+type Joe struct {
+ message chan publishedMessage
+ subscription chan subscription
+ unsubscription chan subscriber
+ done chan struct{}
+ closed chan struct{}
+ subscribers map[subscriber]Subscription
+
+ // An optional replayer that Joe uses to resend older messages to new subscribers.
+ Replayer Replayer
+
+ initDone sync.Once
+}
+
+// Subscribe tells Joe to send new messages to this subscriber. The subscription
+// is automatically removed when the context is done, a client error occurs
+// or Joe is stopped.
+//
+// Subscribe returns without error only when the unsubscription is caused
+// by the given context being canceled.
+func (j *Joe) Subscribe(ctx context.Context, sub Subscription) error {
+ j.init()
+
+ // Without a buffered channel we risk a deadlock when Subscribe
+ // stops receiving from this channel on done context and Joe
+ // encounters an error when sending messages or replaying.
+ done := make(chan error, 1)
+
+ select {
+ case <-j.done:
+ return ErrProviderClosed
+ case j.subscription <- subscription{done: done, Subscription: sub}:
+ }
+
+ select {
+ case err := <-done:
+ return err
+ case <-j.closed:
+ return ErrProviderClosed
+ case <-ctx.Done():
+ }
+
+ select {
+ case <-j.done:
+ return ErrProviderClosed
+ case j.unsubscription <- done:
+ // NOTE(tmaxmax): should we return ctx.Err() instead?
+ return nil
+ }
+}
+
+// Publish tells Joe to send the given message to the subscribers.
+// When a message is published to multiple topics, Joe makes sure to
+// not send the Message multiple times to clients that are subscribed
+// to more than one topic that receive the given Message. Every client
+// receives each unique message once, regardless of how many topics it
+// is subscribed to or to how many topics the message is published.
+//
+// It returns ErrNoTopic if no topics are provided, eventual Replayer.Put
+// errors or ErrProviderClosed. If the replayer returns an error the
+// message will still be sent but most probably it won't be replayed to
+// new subscribers, depending on how the error is handled by the replay provider.
+func (j *Joe) Publish(msg *Message, topics []string) error {
+ if len(topics) == 0 {
+ return ErrNoTopic
+ }
+
+ j.init()
+
+ // Buffered to prevent a deadlock when Publish doesn't
+ // receive from errs due to Joe being shut down and the
+ // message published causes an error after the shutdown.
+ errs := make(chan error, 1)
+
+ pub := publishedMessage{replayerErr: errs}
+ pub.message = msg
+ pub.topics = topics
+
+ // Waiting on done ensures Publish doesn't block the caller goroutine
+ // when Joe is stopped and implements the required Provider behavior.
+ select {
+ case j.message <- pub:
+ return <-errs
+ case <-j.done:
+ return ErrProviderClosed
+ }
+}
+
+// Shutdown signals Joe to close all subscribers and stop receiving messages.
+// It returns when all the subscribers are closed.
+//
+// Further calls to Stop will return ErrProviderClosed.
+func (j *Joe) Shutdown(ctx context.Context) (err error) {
+ j.init()
+
+ defer func() {
+ if r := recover(); r != nil {
+ err = ErrProviderClosed
+ }
+ }()
+
+ close(j.done)
+
+ select {
+ case <-j.closed:
+ case <-ctx.Done():
+ err = ctx.Err()
+ }
+
+ return
+}
+
+func (j *Joe) removeSubscriber(sub subscriber) {
+ l := len(j.subscribers)
+ delete(j.subscribers, sub)
+ // We check that an element was deleted as removeSubscriber is called twice
+ // in the following edge case: the subscriber context is done before a
+ // published message is sent/flushed, and the send/flush returns an error.
+ if l != len(j.subscribers) {
+ close(sub)
+ }
+}
+
+func (j *Joe) start(replay Replayer) {
+ defer close(j.closed)
+
+ for {
+ select {
+ case msg := <-j.message:
+ if replay != nil {
+ m, err := tryPut(msg.messageWithTopics, &replay)
+ if _, isPanic := err.(replayPanic); err != nil && !isPanic { //nolint:errorlint // it's our error
+ // NOTE(tmaxmax): We could return panic errors here but we'd have to expose
+ // the error type in order for this error to be handled. Let's not change
+ // the public errors for now. See also the other note below.
+ msg.replayerErr <- err
+ } else if m != nil {
+ msg.message = m
+ }
+ }
+ close(msg.replayerErr)
+
+ for done, sub := range j.subscribers {
+ if topicsIntersect(sub.Topics, msg.topics) {
+ err := sub.Client.Send(msg.message)
+ if err == nil {
+ err = sub.Client.Flush()
+ }
+
+ if err != nil {
+ done <- err
+ // Technically it would be possible to just send the error,
+ // as Subscribe would send an unsubscription signal. The problem
+ // is that if the j.message channel is ready together with j.unsubscription
+ // and j.message is picked we might send again to this now unsubscribed
+ // subscriber, which will cause issues (e.g. deadlock on done).
+ // This line here is the reason why we need to verify we actually
+ // have this subscriber in removeSubscriber above.
+ j.removeSubscriber(done)
+ }
+ }
+ }
+ case sub := <-j.subscription:
+ var err error
+ if replay != nil {
+ err = tryReplay(sub.Subscription, &replay)
+ }
+
+ // NOTE(tmaxmax): We can't meaningfully handle replay panics in any way
+ // other than disabling replay altogether. This ensures uptime
+ // in the face of unexpected – returning the panic as an error
+ // to the subscriber doesn't make sense, as it's probably not the subscriber's fault.
+ if _, isPanic := err.(replayPanic); err != nil && !isPanic { //nolint:errorlint // it's our error
+ sub.done <- err
+ close(sub.done)
+ } else {
+ j.subscribers[sub.done] = sub.Subscription
+ }
+ case sub := <-j.unsubscription:
+ j.removeSubscriber(sub)
+ case <-j.done:
+ return
+ }
+ }
+}
+
+func tryReplay(sub Subscription, replay *Replayer) (err error) { //nolint:gocritic // intended
+ defer handleReplayerPanic(replay, &err)
+
+ return (*replay).Replay(sub)
+}
+
+func tryPut(msg messageWithTopics, replay *Replayer) (m *Message, err error) { //nolint:gocritic // intended
+ defer handleReplayerPanic(replay, &err)
+
+ return (*replay).Put(msg.message, msg.topics)
+}
+
+type replayPanic struct{}
+
+func (replayPanic) Error() string { return "replay provider panicked" }
+
+func handleReplayerPanic(replay *Replayer, errp *error) { //nolint:gocritic // intended
+ if r := recover(); r != nil {
+ *replay = nil
+ *errp = replayPanic{}
+ // NOTE(tmaxmax): At least print a stacktrace. It's annoying when libraries recover from panics
+ // and make them untraceable. Should we provide a way to handle these in a custom manner?
+ debug.PrintStack()
+ }
+}
+
+func (j *Joe) init() {
+ j.initDone.Do(func() {
+ j.message = make(chan publishedMessage)
+ j.subscription = make(chan subscription)
+ j.unsubscription = make(chan subscriber)
+ j.done = make(chan struct{})
+ j.closed = make(chan struct{})
+ j.subscribers = map[subscriber]Subscription{}
+
+ replay := j.Replayer
+ if replay == nil {
+ replay = noopReplayer{}
+ }
+ go j.start(replay)
+ })
+}
diff --git a/vendor/github.com/tmaxmax/go-sse/message.go b/vendor/github.com/tmaxmax/go-sse/message.go
new file mode 100644
index 0000000000..c39b190f5c
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/message.go
@@ -0,0 +1,354 @@
+package sse
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+ "unsafe"
+
+ "github.com/tmaxmax/go-sse/internal/parser"
+)
+
+func isSingleLine(p string) bool {
+ _, newlineLen := parser.NewlineIndex(p)
+ return newlineLen == 0
+}
+
+// fieldBytes holds the byte representation of each field type along with a colon at the end.
+var (
+ fieldBytesData = []byte(parser.FieldNameData + ": ")
+ fieldBytesEvent = []byte(parser.FieldNameEvent + ": ")
+ fieldBytesRetry = []byte(parser.FieldNameRetry + ": ")
+ fieldBytesID = []byte(parser.FieldNameID + ": ")
+ fieldBytesComment = []byte(": ")
+)
+
+type chunk struct {
+ content string
+ isComment bool
+}
+
+var newline = []byte{'\n'}
+
+func (c *chunk) WriteTo(w io.Writer) (int64, error) {
+ name := fieldBytesData
+ if c.isComment {
+ name = fieldBytesComment
+ }
+ n, err := w.Write(name)
+ if err != nil {
+ return int64(n), err
+ }
+ m, err := writeString(w, c.content)
+ n += m
+ if err != nil {
+ return int64(n), err
+ }
+ m, err = w.Write(newline)
+ return int64(n + m), err
+}
+
+// Message is the representation of an event sent from the server to its clients.
+type Message struct {
+ chunks []chunk
+
+ ID EventID
+ Type EventType
+ Retry time.Duration
+}
+
+func (e *Message) appendText(isComment bool, chunks ...string) {
+ for _, c := range chunks {
+ var content string
+
+ for c != "" {
+ content, c, _ = parser.NextChunk(c)
+ e.chunks = append(e.chunks, chunk{content: content, isComment: isComment})
+ }
+ }
+}
+
+// AppendData adds multiple data fields on the message's event from the given strings.
+// Each string will be a distinct data field, and if the strings themselves span multiple lines
+// they will be broken into multiple fields.
+//
+// Server-sent events are not suited for binary data: the event fields are delimited by newlines,
+// where a newline can be a LF, CR or CRLF sequence. When the client interprets the fields,
+// it joins multiple data fields using LF, so information is altered. Here's an example:
+//
+// initial payload: This is a\r\nmultiline\rtext.\nIt has multiple\nnewline\r\nvariations.
+// data sent over the wire:
+// data: This is a
+// data: multiline
+// data: text.
+// data: It has multiple
+// data: newline
+// data: variations
+// data received by client: This is a\nmultiline\ntext.\nIt has multiple\nnewline\nvariations.
+//
+// Each line prepended with "data:" is a field; multiple data fields are joined together using LF as the delimiter.
+// If you attempted to send the same payload without prepending the "data:" prefix, like so:
+//
+// data: This is a
+// multiline
+// text.
+// It has multiple
+// newline
+// variations
+//
+// there would be only one data field (the first one). The rest would be different fields, named "multiline", "text.",
+// "It has multiple" etc., which are invalid fields according to the protocol.
+//
+// Besides, the protocol explicitly states that event streams must always be UTF-8 encoded:
+// https://html.spec.whatwg.org/multipage/server-sent-events.html#parsing-an-event-stream.
+//
+// If you need to send binary data, you can use a Base64 encoder or any other encoder that does not output
+// any newline characters (\r or \n) and then append the resulted data.
+//
+// Given that clients treat all newlines the same and replace the original newlines with LF,
+// for internal code simplicity AppendData replaces them as well.
+func (e *Message) AppendData(chunks ...string) {
+ e.appendText(false, chunks...)
+}
+
+// AppendComment adds comment fields to the message's event.
+// If the comments span multiple lines, they are broken into multiple comment fields.
+func (e *Message) AppendComment(comments ...string) {
+ e.appendText(true, comments...)
+}
+
+func (e *Message) writeMessageField(w io.Writer, f messageField, fieldBytes []byte) (int64, error) {
+ if !f.IsSet() {
+ return 0, nil
+ }
+
+ n, err := w.Write(fieldBytes)
+ if err != nil {
+ return int64(n), err
+ }
+ m, err := writeString(w, f.String())
+ n += m
+ if err != nil {
+ return int64(n), err
+ }
+ m, err = w.Write(newline)
+ return int64(n + m), err
+}
+
+func (e *Message) writeID(w io.Writer) (int64, error) {
+ return e.writeMessageField(w, e.ID.messageField, fieldBytesID)
+}
+
+func (e *Message) writeType(w io.Writer) (int64, error) {
+ return e.writeMessageField(w, e.Type.messageField, fieldBytesEvent)
+}
+
+func (e *Message) writeRetry(w io.Writer) (int64, error) {
+ millis := e.Retry.Milliseconds()
+ if millis <= 0 {
+ return 0, nil
+ }
+
+ n, err := w.Write(fieldBytesRetry)
+ if err != nil {
+ return int64(n), err
+ }
+
+ var buf [13]byte // log10(INT64_MAX / 1e6) ~= 13
+
+ i := len(buf) - 1
+ for millis != 0 {
+ buf[i] = '0' + byte(millis%10)
+ i--
+ millis /= 10
+ }
+
+ m, err := w.Write(buf[i+1:])
+ n += m
+ if err != nil {
+ return int64(n), err
+ }
+ m, err = w.Write(newline)
+ return int64(n + m), err
+}
+
+// WriteTo writes the standard textual representation of the message's event to an io.Writer.
+// This operation is heavily optimized, so it is strongly preferred over MarshalText or String.
+func (e *Message) WriteTo(w io.Writer) (int64, error) {
+ n, err := e.writeID(w)
+ if err != nil {
+ return n, err
+ }
+ m, err := e.writeType(w)
+ n += m
+ if err != nil {
+ return n, err
+ }
+ m, err = e.writeRetry(w)
+ n += m
+ if err != nil {
+ return n, err
+ }
+ for i := range e.chunks {
+ m, err = e.chunks[i].WriteTo(w)
+ n += m
+ if err != nil {
+ return n, err
+ }
+ }
+ if n == 0 {
+ return 0, nil
+ }
+ o, err := w.Write(newline)
+ return int64(o) + n, err
+}
+
+// MarshalText writes the standard textual representation of the message's event. Marshalling and unmarshalling will
+// result in a message with an event that has the same fields; topic will be lost.
+//
+// If you want to preserve everything, create your own custom marshalling logic.
+// For an example using encoding/json, see the top-level MessageCustomJSONMarshal example.
+//
+// Use the WriteTo method if you don't need the byte representation.
+//
+// The representation is written to a bytes.Buffer, which means the error is always nil.
+// If the buffer grows to a size bigger than the maximum allowed, MarshalText will panic.
+// See the bytes.Buffer documentation for more info.
+func (e *Message) MarshalText() ([]byte, error) {
+ b := bytes.Buffer{}
+ _, err := e.WriteTo(&b)
+ return b.Bytes(), err
+}
+
+// String writes the message's event standard textual representation to a strings.Builder and returns the resulted string.
+// It may panic if the representation is too long to be buffered.
+//
+// Use the WriteTo method if you don't actually need the string representation.
+func (e *Message) String() string {
+ s := strings.Builder{}
+ _, _ = e.WriteTo(&s)
+ return s.String()
+}
+
+// UnmarshalError is the error returned by the Message's UnmarshalText method.
+// If the error is related to a specific field, FieldName will be a non-empty string.
+// If no fields were found in the target text or any other errors occurred, only
+// a Reason will be provided. Reason is always present.
+type UnmarshalError struct {
+ Reason error
+ FieldName string
+ // The value of the invalid field.
+ FieldValue string
+}
+
+func (u *UnmarshalError) Error() string {
+ if u.FieldName == "" {
+ return fmt.Sprintf("unmarshal event error: %s", u.Reason.Error())
+ }
+ return fmt.Sprintf("unmarshal event error, %s field invalid: %s. contents: %s", u.FieldName, u.Reason.Error(), u.FieldValue)
+}
+
+func (u *UnmarshalError) Unwrap() error {
+ return u.Reason
+}
+
+// ErrUnexpectedEOF is returned when unmarshaling a Message from an input that doesn't end in a newline.
+//
+// If it returned from a Connection, it means that the data from the server has reached EOF
+// in the middle of an incomplete event and retries are disabled (normally the client retries
+// the connection in this situation).
+var ErrUnexpectedEOF = parser.ErrUnexpectedEOF
+
+func (e *Message) reset() {
+ e.chunks = nil
+ e.Type = EventType{}
+ e.ID = EventID{}
+ e.Retry = 0
+}
+
+// UnmarshalText extracts the first event found in the given byte slice into the
+// receiver. The input is expected to be a wire format event, as defined by the spec.
+// Therefore, previous fields present on the Message will be overwritten
+// (i.e. event, ID, comments, data, retry).
+//
+// Unmarshaling ignores fields with invalid names. If no valid fields are found,
+// an error is returned. For a field to be valid it must end in a newline - if the last
+// field of the event doesn't end in one, an error is returned.
+//
+// All returned errors are of type UnmarshalError.
+func (e *Message) UnmarshalText(p []byte) error {
+ e.reset()
+
+ s := parser.NewFieldParser(string(p))
+ s.KeepComments(true)
+ s.RemoveBOM(true)
+
+loop:
+ for f := (parser.Field{}); s.Next(&f); {
+ switch f.Name {
+ case parser.FieldNameRetry:
+ if i := strings.IndexFunc(f.Value, func(r rune) bool {
+ return r < '0' || r > '9'
+ }); i != -1 {
+ r, _ := utf8.DecodeRuneInString(f.Value[i:])
+
+ return &UnmarshalError{
+ FieldName: string(f.Name),
+ FieldValue: f.Value,
+ Reason: fmt.Errorf("contains character %q, which is not an ASCII digit", r),
+ }
+ }
+
+ milli, err := strconv.ParseInt(f.Value, 10, 64)
+ if err != nil {
+ return &UnmarshalError{
+ FieldName: string(f.Name),
+ FieldValue: f.Value,
+ Reason: fmt.Errorf("invalid retry value: %w", err),
+ }
+ }
+
+ e.Retry = time.Duration(milli) * time.Millisecond
+ case parser.FieldNameData, parser.FieldNameComment:
+ e.chunks = append(e.chunks, chunk{content: f.Value, isComment: f.Name == parser.FieldNameComment})
+ case parser.FieldNameEvent:
+ e.Type.value = f.Value
+ e.Type.set = true
+ case parser.FieldNameID:
+ if strings.IndexByte(f.Value, 0) != -1 {
+ break
+ }
+
+ e.ID.value = f.Value
+ e.ID.set = true
+ default: // event end
+ break loop
+ }
+ }
+
+ if len(e.chunks) == 0 && !e.Type.IsSet() && e.Retry == 0 && !e.ID.IsSet() || s.Err() != nil {
+ e.reset()
+ return &UnmarshalError{Reason: ErrUnexpectedEOF}
+ }
+ return nil
+}
+
+// Clone returns a copy of the message.
+func (e *Message) Clone() *Message {
+ return &Message{
+ // The first AppendData will trigger a reallocation.
+ // Already appended chunks cannot be modified/removed, so this is safe.
+ chunks: e.chunks[:len(e.chunks):len(e.chunks)],
+ Retry: e.Retry,
+ Type: e.Type,
+ ID: e.ID,
+ }
+}
+
+func writeString(w io.Writer, s string) (int, error) {
+ return w.Write(unsafe.Slice(unsafe.StringData(s), len(s)))
+}
diff --git a/vendor/github.com/tmaxmax/go-sse/message_fields.go b/vendor/github.com/tmaxmax/go-sse/message_fields.go
new file mode 100644
index 0000000000..4fb60420ff
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/message_fields.go
@@ -0,0 +1,181 @@
+package sse
+
+import (
+ "database/sql/driver"
+ "encoding/json"
+ "errors"
+ "fmt"
+)
+
+// EventID is a value of the "id" field.
+// It must have a single line.
+type EventID struct {
+ messageField
+}
+
+// NewID creates an event ID value. A valid ID must not have any newlines.
+// If the input is not valid, an unset (invalid) ID is returned.
+func NewID(value string) (EventID, error) {
+ f, err := newMessageField(value)
+ if err != nil {
+ return EventID{}, fmt.Errorf("invalid event ID: %w", err)
+ }
+
+ return EventID{f}, nil
+}
+
+// ID creates an event ID and assumes it is valid.
+// If it is not valid, it panics.
+func ID(value string) EventID {
+ return must(NewID(value))
+}
+
+// EventType is a value of the "event" field.
+// It must have a single line.
+type EventType struct {
+ messageField
+}
+
+// NewType creates a value for the "event" field.
+// It is valid if it does not have any newlines.
+// If the input is not valid, an unset (invalid) ID is returned.
+func NewType(value string) (EventType, error) {
+ f, err := newMessageField(value)
+ if err != nil {
+ return EventType{}, fmt.Errorf("invalid event type: %w", err)
+ }
+
+ return EventType{f}, nil
+}
+
+// Type creates an EventType and assumes it is valid.
+// If it is not valid, it panics.
+func Type(value string) EventType {
+ return must(NewType(value))
+}
+
+func must[T any](v T, err error) T {
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// The messageField struct represents any valid field value
+// i.e. single line strings.
+// Must be passed by value and are comparable.
+type messageField struct {
+ value string
+ set bool
+}
+
+func newMessageField(value string) (messageField, error) {
+ if !isSingleLine(value) {
+ return messageField{}, errors.New("input is multiline")
+ }
+ return messageField{value: value, set: true}, nil
+}
+
+// IsSet returns true if the receiver is a valid (set) value.
+func (i messageField) IsSet() bool {
+ return i.set
+}
+
+// String returns the underlying value. The value may be an empty string,
+// make sure to check if the value is set before using it.
+func (i messageField) String() string {
+ return i.value
+}
+
+// UnmarshalText sets the underlying value to the given string, if valid.
+// If the input is invalid, no changes are made to the receiver.
+func (i *messageField) UnmarshalText(data []byte) error {
+ *i = messageField{}
+
+ id, err := newMessageField(string(data))
+ if err != nil {
+ return err
+ }
+
+ *i = id
+
+ return nil
+}
+
+// UnmarshalJSON sets the underlying value to the given JSON value
+// if the value is a string. The previous value is discarded if the operation fails.
+func (i *messageField) UnmarshalJSON(data []byte) error {
+ *i = messageField{}
+
+ if string(data) == "null" {
+ return nil
+ }
+
+ var input string
+
+ if err := json.Unmarshal(data, &input); err != nil {
+ return err
+ }
+
+ id, err := newMessageField(input)
+ if err != nil {
+ return err
+ }
+
+ *i = id
+
+ return nil
+}
+
+// MarshalText returns a copy of the underlying value if it is set.
+// It returns an error when trying to marshal an unset value.
+func (i *messageField) MarshalText() ([]byte, error) {
+ if i.IsSet() {
+ return []byte(i.String()), nil
+ }
+
+ return nil, fmt.Errorf("can't marshal unset string to text")
+}
+
+// MarshalJSON returns a JSON representation of the underlying value if it is set.
+// It otherwise returns the representation of the JSON null value.
+func (i *messageField) MarshalJSON() ([]byte, error) {
+ if i.IsSet() {
+ return json.Marshal(i.String())
+ }
+
+ return json.Marshal(nil)
+}
+
+// Scan implements the sql.Scanner interface. Values can be scanned from:
+// - nil interfaces (result: unset value)
+// - byte slice
+// - string
+func (i *messageField) Scan(src interface{}) error {
+ *i = messageField{}
+
+ if src == nil {
+ return nil
+ }
+
+ switch v := src.(type) {
+ case []byte:
+ i.value = string(v)
+ case string:
+ i.value = string([]byte(v))
+ default:
+ return fmt.Errorf("unsupported Scan, storing driver.Value type %T into type %T", src, *i)
+ }
+
+ i.set = true
+
+ return nil
+}
+
+// Value implements the driver.Valuer interface.
+func (i messageField) Value() (driver.Value, error) {
+ if i.IsSet() {
+ return i.String(), nil
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/tmaxmax/go-sse/replay.go b/vendor/github.com/tmaxmax/go-sse/replay.go
new file mode 100644
index 0000000000..6cbe015462
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/replay.go
@@ -0,0 +1,388 @@
+package sse
+
+import (
+ "errors"
+ "strconv"
+ "time"
+)
+
+// NewFiniteReplayer creates a finite replay provider with the given max
+// count and auto ID behaviour.
+//
+// Count is the maximum number of events FiniteReplayer should hold as
+// valid. It must be greater than zero.
+//
+// AutoIDs configures FiniteReplayer to automatically set the IDs of
+// events.
+func NewFiniteReplayer(
+ count int, autoIDs bool,
+) (*FiniteReplayer, error) {
+ if count < 2 {
+ return nil, errors.New("count must be at least 2")
+ }
+
+ r := &FiniteReplayer{}
+ r.buf.buf = make([]messageWithTopics, count)
+ if autoIDs {
+ r.currentID = new(uint64)
+ }
+
+ return r, nil
+}
+
+// FiniteReplayer is a replayer that replays at maximum a certain number of events.
+// The events must have an ID unless the replayer is configured to set IDs automatically.
+type FiniteReplayer struct {
+ currentID *uint64
+ buf queue[messageWithTopics]
+}
+
+// Put puts a message into the replayer's buffer. If there are more messages than the maximum
+// number, the oldest message is removed.
+func (f *FiniteReplayer) Put(message *Message, topics []string) (*Message, error) {
+ if len(topics) == 0 {
+ return nil, ErrNoTopic
+ }
+
+ message, err := ensureID(message, f.currentID)
+ if err != nil {
+ return nil, err
+ }
+
+ f.buf.enqueue(messageWithTopics{message: message, topics: topics})
+
+ return message, nil
+}
+
+// Replay replays the stored messages to the listener.
+func (f *FiniteReplayer) Replay(subscription Subscription) error {
+ i := findIDInQueue(&f.buf, subscription.LastEventID, f.currentID != nil)
+ if i < 0 {
+ return nil
+ }
+
+ var err error
+ f.buf.each(i)(func(_ int, m messageWithTopics) bool {
+ if topicsIntersect(subscription.Topics, m.topics) {
+ if err = subscription.Client.Send(m.message); err != nil {
+ return false
+ }
+ }
+ return true
+ })
+ if err != nil {
+ return err
+ }
+
+ return subscription.Client.Flush()
+}
+
+// ValidReplayer is a Replayer that replays all the buffered non-expired events.
+//
+// The replayer removes any expired events when a new event is put and after at least
+// a GCInterval period passed.
+//
+// The events must have an ID unless the replayer is configured to set IDs automatically.
+type ValidReplayer struct {
+ lastGC time.Time
+
+ // The function used to retrieve the current time. Defaults to time.Now.
+ // Useful when testing.
+ Now func() time.Time
+
+ currentID *uint64
+ messages queue[messageWithTopicsAndExpiry]
+
+ ttl time.Duration
+ // After how long the replayer should attempt to clean up expired events.
+ // By default cleanup is done after a fourth of the TTL has passed; this means
+ // that messages may be stored for a duration equal to 5/4*TTL. If this is not
+ // desired, set the GC interval to a value sensible for your use case or set
+ // it to 0 – this disables automatic cleanup, enabling you to do it manually
+ // using the GC method.
+ GCInterval time.Duration
+}
+
+// NewValidReplayer creates a ValidReplayer with the given message
+// lifetime duration (time-to-live) and auto ID behavior.
+//
+// The TTL must be a positive duration. It is technically possible to use a very
+// big duration in order to store and replay every message put for the lifetime
+// of the program; this is not recommended, as memory usage becomes effectively
+// unbounded which might lead to a crash.
+func NewValidReplayer(ttl time.Duration, autoIDs bool) (*ValidReplayer, error) {
+ if ttl <= 0 {
+ return nil, errors.New("event TTL must be greater than zero")
+ }
+
+ r := &ValidReplayer{
+ Now: time.Now,
+ GCInterval: ttl / 4,
+ ttl: ttl,
+ }
+
+ if autoIDs {
+ r.currentID = new(uint64)
+ }
+
+ return r, nil
+}
+
+// Put puts the message into the replayer's buffer.
+func (v *ValidReplayer) Put(message *Message, topics []string) (*Message, error) {
+ if len(topics) == 0 {
+ return nil, ErrNoTopic
+ }
+
+ now := v.Now()
+ if v.lastGC.IsZero() {
+ v.lastGC = now
+ }
+
+ if v.shouldGC(now) {
+ v.doGC(now)
+ v.lastGC = now
+ }
+
+ message, err := ensureID(message, v.currentID)
+ if err != nil {
+ return nil, err
+ }
+
+ if v.messages.count == len(v.messages.buf) {
+ newCap := len(v.messages.buf) * 2
+ if minCap := 4; newCap < minCap {
+ newCap = minCap
+ }
+ v.messages.resize(newCap)
+ }
+
+ v.messages.enqueue(messageWithTopicsAndExpiry{messageWithTopics: messageWithTopics{message: message, topics: topics}, exp: now.Add(v.ttl)})
+
+ return message, nil
+}
+
+func (v *ValidReplayer) shouldGC(now time.Time) bool {
+ return v.GCInterval > 0 && now.Sub(v.lastGC) >= v.GCInterval
+}
+
+// GC removes all the expired messages from the replayer's buffer.
+func (v *ValidReplayer) GC() {
+ v.doGC(v.Now())
+}
+
+func (v *ValidReplayer) doGC(now time.Time) {
+ for v.messages.count > 0 {
+ e := v.messages.buf[v.messages.head]
+ if e.exp.After(now) {
+ break
+ }
+
+ v.messages.dequeue()
+ }
+
+ if v.messages.count <= len(v.messages.buf)/4 {
+ newCap := len(v.messages.buf) / 2
+ if minCap := 4; newCap < minCap {
+ newCap = minCap
+ }
+ v.messages.resize(newCap)
+ }
+}
+
+// Replay replays all the valid messages to the listener.
+func (v *ValidReplayer) Replay(subscription Subscription) error {
+ i := findIDInQueue(&v.messages, subscription.LastEventID, v.currentID != nil)
+ if i < 0 {
+ return nil
+ }
+
+ now := v.Now()
+
+ var err error
+ v.messages.each(i)(func(_ int, m messageWithTopicsAndExpiry) bool {
+ if m.exp.After(now) && topicsIntersect(subscription.Topics, m.topics) {
+ if err = subscription.Client.Send(m.message); err != nil {
+ return false
+ }
+ }
+ return true
+ })
+ if err != nil {
+ return err
+ }
+
+ return subscription.Client.Flush()
+}
+
+// topicsIntersect returns true if the given topic slices have at least one topic in common.
+func topicsIntersect(a, b []string) bool {
+ for _, at := range a {
+ for _, bt := range b {
+ if at == bt {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+func ensureID(m *Message, currentID *uint64) (*Message, error) {
+ if currentID == nil {
+ if !m.ID.IsSet() {
+ return nil, errors.New("message has no ID")
+ }
+
+ return m, nil
+ }
+
+ if m.ID.IsSet() {
+ return nil, errors.New("message already has an ID, can't use generated ID")
+ }
+
+ m = m.Clone()
+ m.ID = ID(strconv.FormatUint(*currentID, 10))
+
+ (*currentID)++
+
+ return m, nil
+}
+
+type queue[T any] struct {
+ buf []T
+ head, tail, count int
+}
+
+func (q *queue[T]) each(startAt int) func(func(int, T) bool) {
+ return func(yield func(int, T) bool) {
+ if startAt < q.tail {
+ for i := startAt; i < q.tail; i++ {
+ if !yield(i, q.buf[i]) {
+ return
+ }
+ }
+ } else {
+ for i := startAt; i < len(q.buf); i++ {
+ if !yield(i, q.buf[i]) {
+ return
+ }
+ }
+ for i := 0; i < q.tail; i++ {
+ if !yield(i, q.buf[i]) {
+ return
+ }
+ }
+ }
+ }
+}
+
+func (q *queue[T]) enqueue(v T) {
+ q.buf[q.tail] = v
+
+ q.tail++
+
+ overwritten := false
+ if q.tail > q.head && q.count == len(q.buf) {
+ q.head = q.tail
+ overwritten = true
+ } else {
+ q.count++
+ }
+
+ if q.tail == len(q.buf) {
+ q.tail = 0
+ if overwritten {
+ q.head = 0
+ }
+ }
+}
+
+func (q *queue[T]) dequeue() {
+ q.buf[q.head] = *new(T)
+
+ q.head++
+ if q.head == len(q.buf) {
+ q.head = 0
+ }
+
+ q.count--
+}
+
+func (q *queue[T]) resize(newSize int) {
+ buf := make([]T, newSize)
+ if q.head < q.tail {
+ copy(buf, q.buf[q.head:q.tail])
+ } else {
+ n := copy(buf, q.buf[q.head:])
+ copy(buf[n:], q.buf[:q.tail])
+ }
+
+ q.head = 0
+ q.tail = q.count
+ q.buf = buf
+}
+
+func findIDInQueue[M interface{ ID() EventID }](q *queue[M], id EventID, autoID bool) int {
+ if q.count == 0 {
+ return -1
+ }
+
+ if autoID {
+ id, err := strconv.ParseUint(id.String(), 10, 64)
+ if err != nil {
+ return -1
+ }
+
+ firstID, _ := strconv.ParseUint(q.buf[q.head].ID().String(), 10, 64)
+
+ pos := -1
+ if delta := id - firstID; id >= firstID {
+ if delta >= uint64(q.count) { //nolint:gosec // int always positive
+ return -1
+ }
+ pos = int(delta) //nolint:gosec // delta < q.count, which is an int
+ }
+
+ i := pos + q.head + 1
+ if i >= len(q.buf) {
+ i -= len(q.buf)
+ }
+
+ return i
+ }
+
+ i := -1
+ q.each(q.head)(func(j int, m M) bool {
+ if m.ID() == id {
+ i = j
+ return false
+ }
+ return true
+ })
+
+ if i != -1 {
+ i++
+ if i == len(q.buf) {
+ i = 0
+ } else if i == q.tail {
+ i = -1
+ }
+ }
+
+ return i
+}
+
+func (m messageWithTopics) ID() EventID { return m.message.ID }
+
+type messageWithTopicsAndExpiry struct {
+ exp time.Time
+ messageWithTopics
+}
+
+// noopReplayer is the default replay provider used if none is given. It does nothing.
+// It is used to avoid nil checks for the provider each time it is used.
+type noopReplayer struct{}
+
+func (n noopReplayer) Put(m *Message, _ []string) (*Message, error) { return m, nil }
+func (n noopReplayer) Replay(_ Subscription) error { return nil }
diff --git a/vendor/github.com/tmaxmax/go-sse/server.go b/vendor/github.com/tmaxmax/go-sse/server.go
new file mode 100644
index 0000000000..7e27a58a08
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/server.go
@@ -0,0 +1,233 @@
+/*
+Package sse provides utilities for creating and consuming fully spec-compliant HTML5 server-sent events streams.
+
+The central piece of a server's implementation is the Provider interface. A Provider describes a publish-subscribe
+system that can be used to implement messaging for the SSE protocol. This package already has an
+implementation, called Joe, that is the default provider for any server. Abstracting the messaging
+system implementation away allows servers to use any arbitrary provider under the same interface.
+The default provider will work for simple use-cases, but where scalability is required, one will
+look at a more suitable solution. Adapters that satisfy the Provider interface can easily be created,
+and then plugged into the server instance.
+Events themselves are represented using the Message type.
+
+On the client-side, we use the Client struct to create connections to event streams. Using an `http.Request`
+we instantiate a Connection. Then we subscribe to incoming events using callback functions, and then
+we establish the connection by calling the Connection's Connect method.
+*/
+package sse
+
+import (
+ "context"
+ "errors"
+ "log/slog"
+ "net/http"
+ "sync"
+)
+
+// The Subscription struct is used to subscribe to a given provider.
+type Subscription struct {
+ // The client to which messages are sent. The implementation of the interface does not have to be
+ // thread-safe – providers will not call methods on it concurrently.
+ Client MessageWriter
+ // An optional last event ID indicating the event to resume the stream from.
+ // The events will replay starting from the first valid event sent after the one with the given ID.
+ // If the ID is invalid replaying events will be omitted and new events will be sent as normal.
+ LastEventID EventID
+ // The topics to receive message from. Must be a non-empty list.
+ // Topics are orthogonal to event types. They are used to filter what the server sends to each client.
+ Topics []string
+}
+
+// A Provider is a publish-subscribe system that can be used to implement a HTML5 server-sent events
+// protocol. A standard interface is required so HTTP request handlers are agnostic to the provider's implementation.
+//
+// Providers are required to be thread-safe.
+//
+// After Shutdown is called, trying to call any method of the provider must return ErrProviderClosed. The providers
+// may return other implementation-specific errors too, but the close error is guaranteed to be the same across
+// providers.
+type Provider interface {
+ // Subscribe to the provider. The context is used to remove the subscriber automatically
+ // when it is done. Errors returned by the subscription's callback function must be returned
+ // by Subscribe.
+ //
+ // Providers can assume that the topics list for a subscription has at least one topic.
+ Subscribe(ctx context.Context, subscription Subscription) error
+ // Publish a message to all the subscribers that are subscribed to the given topics.
+ // The topics slice must be non-empty, or ErrNoTopic will be raised.
+ Publish(message *Message, topics []string) error
+ // Shutdown stops the provider. Calling Shutdown will clean up all the provider's resources
+ // and make Subscribe and Publish fail with an error. All the listener channels will be
+ // closed and any ongoing publishes will be aborted.
+ //
+ // If the given context times out before the provider is shut down – shutting it down takes
+ // longer, the context error is returned.
+ //
+ // Calling Shutdown multiple times after it successfully returned the first time
+ // does nothing but return ErrProviderClosed.
+ Shutdown(ctx context.Context) error
+}
+
+// ErrProviderClosed is a sentinel error returned by providers when any operation is attempted after the provider is closed.
+// A closed provider might also be a result of an unexpected panic inside the provider.
+var ErrProviderClosed = errors.New("go-sse.server: provider is closed")
+
+// ErrNoTopic is a sentinel error returned when a Message is published without any topics.
+// It is not an issue to call Server.Publish without topics, because the Server will add the DefaultTopic;
+// it is an error to call Provider.Publish or Replayer.Put without any topics, though.
+var ErrNoTopic = errors.New("go-sse.server: no topics specified")
+
+// DefaultTopic is the identifier for the topic that is implied when no topics are specified for a Subscription
+// or a Message.
+const DefaultTopic = ""
+
+// A Server is mostly a convenience wrapper around a Provider.
+// It implements the http.Handler interface and has some methods
+// for calling the underlying provider's methods.
+//
+// When creating a server, if no provider is specified using the WithProvider
+// option, the Joe provider found in this package with no replay provider is used.
+type Server struct {
+ // The provider used to publish and subscribe clients to events.
+ // Defaults to Joe.
+ Provider Provider
+ // A callback that's called when an SSE session is started.
+ // You can use this to authorize the session, set the topics
+ // the client should be subscribed to and so on. Using the
+ // Res field of the Session you can write an error response
+ // to the client.
+ //
+ // The boolean returned indicates whether the given request
+ // should be accepted or not. If it is true, the Provider will receive
+ // a new subscription for the connection and events will be sent
+ // to this client, otherwise the request will be ended.
+ //
+ // Note that OnSession can write the HTTP response code itself, if something other
+ // than the implicit 200 OK is desired. This is especially helpful when refusing sessions –
+ // if OnSession does not write a response code, clients will receive a confusing 200 OK.
+ //
+ // If this is not set, the client will be subscribed to the provider
+ // using the DefaultTopic.
+ OnSession func(w http.ResponseWriter, r *http.Request) (topics []string, allowed bool)
+ // If the Logger function is set and returns a non-nil Logger instance,
+ // the Server will log various information about the request lifecycle.
+ Logger func(r *http.Request) *slog.Logger
+
+ provider Provider
+ initDone sync.Once
+}
+
+// ServeHTTP implements a default HTTP handler for a server.
+//
+// This handler upgrades the request, subscribes it to the server's provider and
+// starts sending incoming events to the client, while logging any errors.
+// It also sends the Last-Event-ID header's value, if present.
+//
+// If the request isn't upgradeable, it writes a message to the client along with
+// an 500 Internal Server ConnectionError response code. If on subscribe the provider returns
+// an error, it writes the error message to the client and a 500 Internal Server ConnectionError
+// response code.
+//
+// To customize behavior, use the OnSession callback or create your custom handler.
+func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ s.init()
+ // Make sure to keep the ServeHTTP implementation line number in sync with the number in the README!
+
+ var l *slog.Logger
+ if s.Logger != nil {
+ l = s.Logger(r)
+ }
+
+ if l != nil {
+ l.Info("sse: starting new session")
+ }
+
+ sess, err := Upgrade(w, r)
+ if err != nil {
+ if l != nil {
+ l.Error("sse: unsupported", "error", err)
+ }
+
+ http.Error(w, "Server-sent events unsupported", http.StatusInternalServerError)
+ return
+ }
+
+ sub, ok := s.getSubscription(sess)
+ if !ok {
+ if l != nil {
+ l.Warn("sse: invalid subscription")
+ }
+
+ return
+ }
+
+ if l != nil {
+ l.Info("sse: subscribing session", "topics", sub.Topics, "lastEventID", sub.LastEventID)
+ }
+
+ if err = s.provider.Subscribe(r.Context(), sub); err != nil {
+ if l != nil {
+ l.Error("sse: subscribe error", "error", err)
+ }
+
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if l != nil {
+ l.Info("sse: session ended")
+ }
+}
+
+// Publish sends the event to all subscribes that are subscribed to the topic the event is published to.
+// The topics are optional - if none are specified, the event is published to the DefaultTopic.
+func (s *Server) Publish(e *Message, topics ...string) error {
+ s.init()
+ return s.provider.Publish(e, getTopics(topics))
+}
+
+// Shutdown closes all the connections and stops the server. Publish operations will fail
+// with the error sent by the underlying provider. NewServer requests will be ignored.
+//
+// Call this method when shutting down the HTTP server using http.Server's RegisterOnShutdown
+// method. Not doing this will result in the server never shutting down or connections being
+// abruptly stopped.
+//
+// See the Provider.Shutdown documentation for information on context usage and errors.
+func (s *Server) Shutdown(ctx context.Context) error {
+ s.init()
+ return s.provider.Shutdown(ctx)
+}
+
+func (s *Server) init() {
+ s.initDone.Do(func() {
+ s.provider = s.Provider
+ if s.provider == nil {
+ s.provider = &Joe{}
+ }
+ })
+}
+
+func (s *Server) getSubscription(sess *Session) (Subscription, bool) {
+ sub := Subscription{Client: sess, LastEventID: sess.LastEventID, Topics: defaultTopicSlice}
+ if s.OnSession != nil {
+ topics, ok := s.OnSession(sess.Res, sess.Req)
+ if ok && len(topics) > 0 {
+ sub.Topics = topics
+ }
+
+ return sub, ok
+ }
+
+ return sub, true
+}
+
+var defaultTopicSlice = []string{DefaultTopic}
+
+func getTopics(initial []string) []string {
+ if len(initial) == 0 {
+ return defaultTopicSlice
+ }
+
+ return initial
+}
diff --git a/vendor/github.com/tmaxmax/go-sse/session.go b/vendor/github.com/tmaxmax/go-sse/session.go
new file mode 100644
index 0000000000..2dcc319184
--- /dev/null
+++ b/vendor/github.com/tmaxmax/go-sse/session.go
@@ -0,0 +1,160 @@
+package sse
+
+import (
+ "errors"
+ "net/http"
+)
+
+// ResponseWriter is a http.ResponseWriter augmented with a Flush method.
+type ResponseWriter interface {
+ http.ResponseWriter
+ Flush() error
+}
+
+// MessageWriter is a special kind of response writer used by providers to
+// send Messages to clients.
+type MessageWriter interface {
+ // Send sends the message to the client.
+ // To make sure it is sent, call Flush.
+ Send(m *Message) error
+ // Flush sends any buffered messages to the client.
+ Flush() error
+}
+
+// A Session is an HTTP request from an SSE client.
+// Create one using the Upgrade function.
+//
+// Using a Session you can also access the initial HTTP request,
+// get the last event ID, or write data to the client.
+type Session struct {
+ // The response writer for the request. Can be used to write an error response
+ // back to the client. Must not be used after the Session was subscribed!
+ Res ResponseWriter
+ // The initial HTTP request. Can be used to retrieve authentication data,
+ // topics, or data from context – a logger, for example.
+ Req *http.Request
+ // Last event ID of the client. It is unset if no ID was provided in the Last-Event-Id
+ // request header.
+ LastEventID EventID
+
+ didUpgrade bool
+}
+
+// Send sends the given event to the client. It returns any errors that occurred while writing the event.
+func (s *Session) Send(e *Message) error {
+ if err := s.doUpgrade(); err != nil {
+ return err
+ }
+ if _, err := e.WriteTo(s.Res); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Flush sends any buffered messages to the client.
+func (s *Session) Flush() error {
+ prevDidUpgrade := s.didUpgrade
+ if err := s.doUpgrade(); err != nil {
+ return err
+ }
+ if prevDidUpgrade == s.didUpgrade {
+ return s.Res.Flush()
+ }
+ return nil
+}
+
+func (s *Session) doUpgrade() error {
+ if !s.didUpgrade {
+ s.Res.Header()[headerContentType] = headerContentTypeValue
+ if err := s.Res.Flush(); err != nil {
+ return err
+ }
+ s.didUpgrade = true
+ }
+ return nil
+}
+
+// Upgrade upgrades an HTTP request to support server-sent events.
+// It returns a Session that's used to send events to the client, or an
+// error if the upgrade failed.
+//
+// The headers required by the SSE protocol are only sent when calling
+// the Send method for the first time. If other operations are done before
+// sending messages, other headers and status codes can safely be set.
+func Upgrade(w http.ResponseWriter, r *http.Request) (*Session, error) {
+ rw := getResponseWriter(w)
+ if rw == nil {
+ return nil, ErrUpgradeUnsupported
+ }
+
+ id := EventID{}
+ // Clients must not send empty Last-Event-Id headers:
+ // https://html.spec.whatwg.org/multipage/server-sent-events.html#sse-processing-model
+ if h := r.Header[headerLastEventID]; len(h) != 0 && h[0] != "" {
+ // We ignore the validity flag because if the given ID is invalid then an unset ID will be returned,
+ // which providers are required to ignore.
+ id, _ = NewID(h[0])
+ }
+
+ return &Session{Req: r, Res: rw, LastEventID: id}, nil
+}
+
+// ErrUpgradeUnsupported is returned when a request can't be upgraded to support server-sent events.
+var ErrUpgradeUnsupported = errors.New("go-sse.server: upgrade unsupported")
+
+// Canonicalized header keys.
+const (
+ headerLastEventID = "Last-Event-Id"
+ headerContentType = "Content-Type"
+)
+
+// Pre-allocated header value.
+var headerContentTypeValue = []string{"text/event-stream"}
+
+// Logic below is similar to Go 1.20's ResponseController.
+// We can't use that because we need to check if the request supports
+// flushing messages before we subscribe it to the event stream.
+
+type writeFlusher interface {
+ http.ResponseWriter
+ http.Flusher
+}
+
+type writeFlusherError interface {
+ http.ResponseWriter
+ FlushError() error
+}
+
+type rwUnwrapper interface {
+ Unwrap() http.ResponseWriter
+}
+
+func getResponseWriter(w http.ResponseWriter) ResponseWriter {
+ for {
+ switch v := w.(type) {
+ case writeFlusherError:
+ return flusherErrorWrapper{v}
+ case writeFlusher:
+ return flusherWrapper{v}
+ case rwUnwrapper:
+ w = v.Unwrap()
+ default:
+ return nil
+ }
+ }
+}
+
+type flusherWrapper struct {
+ writeFlusher
+}
+
+func (f flusherWrapper) Flush() error {
+ f.writeFlusher.Flush()
+ return nil
+}
+
+type flusherErrorWrapper struct {
+ writeFlusherError
+}
+
+func (f flusherErrorWrapper) Flush() error { return f.FlushError() }
diff --git a/vendor/gopkg.in/cenkalti/backoff.v1/.gitignore b/vendor/gopkg.in/cenkalti/backoff.v1/.gitignore
deleted file mode 100644
index 00268614f0..0000000000
--- a/vendor/gopkg.in/cenkalti/backoff.v1/.gitignore
+++ /dev/null
@@ -1,22 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
diff --git a/vendor/gopkg.in/cenkalti/backoff.v1/.travis.yml b/vendor/gopkg.in/cenkalti/backoff.v1/.travis.yml
deleted file mode 100644
index 1040404bfb..0000000000
--- a/vendor/gopkg.in/cenkalti/backoff.v1/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: go
-go:
- - 1.3.3
- - tip
-before_install:
- - go get github.com/mattn/goveralls
- - go get golang.org/x/tools/cmd/cover
-script:
- - $HOME/gopath/bin/goveralls -service=travis-ci
diff --git a/vendor/gopkg.in/cenkalti/backoff.v1/LICENSE b/vendor/gopkg.in/cenkalti/backoff.v1/LICENSE
deleted file mode 100644
index 89b8179965..0000000000
--- a/vendor/gopkg.in/cenkalti/backoff.v1/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 Cenk Altı
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/gopkg.in/cenkalti/backoff.v1/README.md b/vendor/gopkg.in/cenkalti/backoff.v1/README.md
deleted file mode 100644
index 13b347fb95..0000000000
--- a/vendor/gopkg.in/cenkalti/backoff.v1/README.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
-
-This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
-
-[Exponential backoff][exponential backoff wiki]
-is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
-in order to gradually find an acceptable rate.
-The retries exponentially increase and stop increasing when a certain threshold is met.
-
-## Usage
-
-See https://godoc.org/github.com/cenkalti/backoff#pkg-examples
-
-## Contributing
-
-* I would like to keep this library as small as possible.
-* Please don't send a PR without opening an issue and discussing it first.
-* If proposed change is not a common use case, I will probably not accept it.
-
-[godoc]: https://godoc.org/github.com/cenkalti/backoff
-[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
-[travis]: https://travis-ci.org/cenkalti/backoff
-[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master
-[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
-[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
-
-[google-http-java-client]: https://github.com/google/google-http-java-client
-[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
-
-[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_
diff --git a/vendor/gopkg.in/cenkalti/backoff.v1/backoff.go b/vendor/gopkg.in/cenkalti/backoff.v1/backoff.go
deleted file mode 100644
index 2102c5f2de..0000000000
--- a/vendor/gopkg.in/cenkalti/backoff.v1/backoff.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Package backoff implements backoff algorithms for retrying operations.
-//
-// Use Retry function for retrying operations that may fail.
-// If Retry does not meet your needs,
-// copy/paste the function into your project and modify as you wish.
-//
-// There is also Ticker type similar to time.Ticker.
-// You can use it if you need to work with channels.
-//
-// See Examples section below for usage examples.
-package backoff
-
-import "time"
-
-// BackOff is a backoff policy for retrying an operation.
-type BackOff interface {
- // NextBackOff returns the duration to wait before retrying the operation,
- // or backoff.Stop to indicate that no more retries should be made.
- //
- // Example usage:
- //
- // duration := backoff.NextBackOff();
- // if (duration == backoff.Stop) {
- // // Do not retry operation.
- // } else {
- // // Sleep for duration and retry operation.
- // }
- //
- NextBackOff() time.Duration
-
- // Reset to initial state.
- Reset()
-}
-
-// Stop indicates that no more retries should be made for use in NextBackOff().
-const Stop time.Duration = -1
-
-// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
-// meaning that the operation is retried immediately without waiting, indefinitely.
-type ZeroBackOff struct{}
-
-func (b *ZeroBackOff) Reset() {}
-
-func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
-
-// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
-// NextBackOff(), meaning that the operation should never be retried.
-type StopBackOff struct{}
-
-func (b *StopBackOff) Reset() {}
-
-func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
-
-// ConstantBackOff is a backoff policy that always returns the same backoff delay.
-// This is in contrast to an exponential backoff policy,
-// which returns a delay that grows longer as you call NextBackOff() over and over again.
-type ConstantBackOff struct {
- Interval time.Duration
-}
-
-func (b *ConstantBackOff) Reset() {}
-func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
-
-func NewConstantBackOff(d time.Duration) *ConstantBackOff {
- return &ConstantBackOff{Interval: d}
-}
diff --git a/vendor/gopkg.in/cenkalti/backoff.v1/context.go b/vendor/gopkg.in/cenkalti/backoff.v1/context.go
deleted file mode 100644
index 5d15709254..0000000000
--- a/vendor/gopkg.in/cenkalti/backoff.v1/context.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package backoff
-
-import (
- "time"
-
- "golang.org/x/net/context"
-)
-
-// BackOffContext is a backoff policy that stops retrying after the context
-// is canceled.
-type BackOffContext interface {
- BackOff
- Context() context.Context
-}
-
-type backOffContext struct {
- BackOff
- ctx context.Context
-}
-
-// WithContext returns a BackOffContext with context ctx
-//
-// ctx must not be nil
-func WithContext(b BackOff, ctx context.Context) BackOffContext {
- if ctx == nil {
- panic("nil context")
- }
-
- if b, ok := b.(*backOffContext); ok {
- return &backOffContext{
- BackOff: b.BackOff,
- ctx: ctx,
- }
- }
-
- return &backOffContext{
- BackOff: b,
- ctx: ctx,
- }
-}
-
-func ensureContext(b BackOff) BackOffContext {
- if cb, ok := b.(BackOffContext); ok {
- return cb
- }
- return WithContext(b, context.Background())
-}
-
-func (b *backOffContext) Context() context.Context {
- return b.ctx
-}
-
-func (b *backOffContext) NextBackOff() time.Duration {
- select {
- case <-b.Context().Done():
- return Stop
- default:
- return b.BackOff.NextBackOff()
- }
-}
diff --git a/vendor/gopkg.in/cenkalti/backoff.v1/exponential.go b/vendor/gopkg.in/cenkalti/backoff.v1/exponential.go
deleted file mode 100644
index 9a6addf075..0000000000
--- a/vendor/gopkg.in/cenkalti/backoff.v1/exponential.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package backoff
-
-import (
- "math/rand"
- "time"
-)
-
-/*
-ExponentialBackOff is a backoff implementation that increases the backoff
-period for each retry attempt using a randomization function that grows exponentially.
-
-NextBackOff() is calculated using the following formula:
-
- randomized interval =
- RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
-
-In other words NextBackOff() will range between the randomization factor
-percentage below and above the retry interval.
-
-For example, given the following parameters:
-
- RetryInterval = 2
- RandomizationFactor = 0.5
- Multiplier = 2
-
-the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
-multiplied by the exponential, that is, between 2 and 6 seconds.
-
-Note: MaxInterval caps the RetryInterval and not the randomized interval.
-
-If the time elapsed since an ExponentialBackOff instance is created goes past the
-MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
-
-The elapsed time can be reset by calling Reset().
-
-Example: Given the following default arguments, for 10 tries the sequence will be,
-and assuming we go over the MaxElapsedTime on the 10th try:
-
- Request # RetryInterval (seconds) Randomized Interval (seconds)
-
- 1 0.5 [0.25, 0.75]
- 2 0.75 [0.375, 1.125]
- 3 1.125 [0.562, 1.687]
- 4 1.687 [0.8435, 2.53]
- 5 2.53 [1.265, 3.795]
- 6 3.795 [1.897, 5.692]
- 7 5.692 [2.846, 8.538]
- 8 8.538 [4.269, 12.807]
- 9 12.807 [6.403, 19.210]
- 10 19.210 backoff.Stop
-
-Note: Implementation is not thread-safe.
-*/
-type ExponentialBackOff struct {
- InitialInterval time.Duration
- RandomizationFactor float64
- Multiplier float64
- MaxInterval time.Duration
- // After MaxElapsedTime the ExponentialBackOff stops.
- // It never stops if MaxElapsedTime == 0.
- MaxElapsedTime time.Duration
- Clock Clock
-
- currentInterval time.Duration
- startTime time.Time
- random *rand.Rand
-}
-
-// Clock is an interface that returns current time for BackOff.
-type Clock interface {
- Now() time.Time
-}
-
-// Default values for ExponentialBackOff.
-const (
- DefaultInitialInterval = 500 * time.Millisecond
- DefaultRandomizationFactor = 0.5
- DefaultMultiplier = 1.5
- DefaultMaxInterval = 60 * time.Second
- DefaultMaxElapsedTime = 15 * time.Minute
-)
-
-// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
-func NewExponentialBackOff() *ExponentialBackOff {
- b := &ExponentialBackOff{
- InitialInterval: DefaultInitialInterval,
- RandomizationFactor: DefaultRandomizationFactor,
- Multiplier: DefaultMultiplier,
- MaxInterval: DefaultMaxInterval,
- MaxElapsedTime: DefaultMaxElapsedTime,
- Clock: SystemClock,
- random: rand.New(rand.NewSource(time.Now().UnixNano())),
- }
- b.Reset()
- return b
-}
-
-type systemClock struct{}
-
-func (t systemClock) Now() time.Time {
- return time.Now()
-}
-
-// SystemClock implements Clock interface that uses time.Now().
-var SystemClock = systemClock{}
-
-// Reset the interval back to the initial retry interval and restarts the timer.
-func (b *ExponentialBackOff) Reset() {
- b.currentInterval = b.InitialInterval
- b.startTime = b.Clock.Now()
-}
-
-// NextBackOff calculates the next backoff interval using the formula:
-// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval)
-func (b *ExponentialBackOff) NextBackOff() time.Duration {
- // Make sure we have not gone over the maximum elapsed time.
- if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime {
- return Stop
- }
- defer b.incrementCurrentInterval()
- if b.random == nil {
- b.random = rand.New(rand.NewSource(time.Now().UnixNano()))
- }
- return getRandomValueFromInterval(b.RandomizationFactor, b.random.Float64(), b.currentInterval)
-}
-
-// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
-// is created and is reset when Reset() is called.
-//
-// The elapsed time is computed using time.Now().UnixNano().
-func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
- return b.Clock.Now().Sub(b.startTime)
-}
-
-// Increments the current interval by multiplying it with the multiplier.
-func (b *ExponentialBackOff) incrementCurrentInterval() {
- // Check for overflow, if overflow is detected set the current interval to the max interval.
- if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
- b.currentInterval = b.MaxInterval
- } else {
- b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
- }
-}
-
-// Returns a random value from the following interval:
-// [randomizationFactor * currentInterval, randomizationFactor * currentInterval].
-func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
- var delta = randomizationFactor * float64(currentInterval)
- var minInterval = float64(currentInterval) - delta
- var maxInterval = float64(currentInterval) + delta
-
- // Get a random value from the range [minInterval, maxInterval].
- // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
- // we want a 33% chance for selecting either 1, 2 or 3.
- return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
-}
diff --git a/vendor/gopkg.in/cenkalti/backoff.v1/retry.go b/vendor/gopkg.in/cenkalti/backoff.v1/retry.go
deleted file mode 100644
index 5dbd825b5c..0000000000
--- a/vendor/gopkg.in/cenkalti/backoff.v1/retry.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package backoff
-
-import "time"
-
-// An Operation is executing by Retry() or RetryNotify().
-// The operation will be retried using a backoff policy if it returns an error.
-type Operation func() error
-
-// Notify is a notify-on-error function. It receives an operation error and
-// backoff delay if the operation failed (with an error).
-//
-// NOTE that if the backoff policy stated to stop retrying,
-// the notify function isn't called.
-type Notify func(error, time.Duration)
-
-// Retry the operation o until it does not return error or BackOff stops.
-// o is guaranteed to be run at least once.
-// It is the caller's responsibility to reset b after Retry returns.
-//
-// If o returns a *PermanentError, the operation is not retried, and the
-// wrapped error is returned.
-//
-// Retry sleeps the goroutine for the duration returned by BackOff after a
-// failed operation returns.
-func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) }
-
-// RetryNotify calls notify function with the error and wait duration
-// for each failed attempt before sleep.
-func RetryNotify(operation Operation, b BackOff, notify Notify) error {
- var err error
- var next time.Duration
-
- cb := ensureContext(b)
-
- b.Reset()
- for {
- if err = operation(); err == nil {
- return nil
- }
-
- if permanent, ok := err.(*PermanentError); ok {
- return permanent.Err
- }
-
- if next = b.NextBackOff(); next == Stop {
- return err
- }
-
- if notify != nil {
- notify(err, next)
- }
-
- t := time.NewTimer(next)
-
- select {
- case <-cb.Context().Done():
- t.Stop()
- return err
- case <-t.C:
- }
- }
-}
-
-// PermanentError signals that the operation should not be retried.
-type PermanentError struct {
- Err error
-}
-
-func (e *PermanentError) Error() string {
- return e.Err.Error()
-}
-
-// Permanent wraps the given err in a *PermanentError.
-func Permanent(err error) *PermanentError {
- return &PermanentError{
- Err: err,
- }
-}
diff --git a/vendor/gopkg.in/cenkalti/backoff.v1/ticker.go b/vendor/gopkg.in/cenkalti/backoff.v1/ticker.go
deleted file mode 100644
index 49a99718d7..0000000000
--- a/vendor/gopkg.in/cenkalti/backoff.v1/ticker.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package backoff
-
-import (
- "runtime"
- "sync"
- "time"
-)
-
-// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
-//
-// Ticks will continue to arrive when the previous operation is still running,
-// so operations that take a while to fail could run in quick succession.
-type Ticker struct {
- C <-chan time.Time
- c chan time.Time
- b BackOffContext
- stop chan struct{}
- stopOnce sync.Once
-}
-
-// NewTicker returns a new Ticker containing a channel that will send the time at times
-// specified by the BackOff argument. Ticker is guaranteed to tick at least once.
-// The channel is closed when Stop method is called or BackOff stops.
-func NewTicker(b BackOff) *Ticker {
- c := make(chan time.Time)
- t := &Ticker{
- C: c,
- c: c,
- b: ensureContext(b),
- stop: make(chan struct{}),
- }
- go t.run()
- runtime.SetFinalizer(t, (*Ticker).Stop)
- return t
-}
-
-// Stop turns off a ticker. After Stop, no more ticks will be sent.
-func (t *Ticker) Stop() {
- t.stopOnce.Do(func() { close(t.stop) })
-}
-
-func (t *Ticker) run() {
- c := t.c
- defer close(c)
- t.b.Reset()
-
- // Ticker is guaranteed to tick at least once.
- afterC := t.send(time.Now())
-
- for {
- if afterC == nil {
- return
- }
-
- select {
- case tick := <-afterC:
- afterC = t.send(tick)
- case <-t.stop:
- t.c = nil // Prevent future ticks from being sent to the channel.
- return
- case <-t.b.Context().Done():
- return
- }
- }
-}
-
-func (t *Ticker) send(tick time.Time) <-chan time.Time {
- select {
- case t.c <- tick:
- case <-t.stop:
- return nil
- }
-
- next := t.b.NextBackOff()
- if next == Stop {
- t.Stop()
- return nil
- }
-
- return time.After(next)
-}
diff --git a/vendor/gopkg.in/cenkalti/backoff.v1/tries.go b/vendor/gopkg.in/cenkalti/backoff.v1/tries.go
deleted file mode 100644
index d2da7308b6..0000000000
--- a/vendor/gopkg.in/cenkalti/backoff.v1/tries.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package backoff
-
-import "time"
-
-/*
-WithMaxTries creates a wrapper around another BackOff, which will
-return Stop if NextBackOff() has been called too many times since
-the last time Reset() was called
-
-Note: Implementation is not thread-safe.
-*/
-func WithMaxTries(b BackOff, max uint64) BackOff {
- return &backOffTries{delegate: b, maxTries: max}
-}
-
-type backOffTries struct {
- delegate BackOff
- maxTries uint64
- numTries uint64
-}
-
-func (b *backOffTries) NextBackOff() time.Duration {
- if b.maxTries > 0 {
- if b.maxTries <= b.numTries {
- return Stop
- }
- b.numTries++
- }
- return b.delegate.NextBackOff()
-}
-
-func (b *backOffTries) Reset() {
- b.numTries = 0
- b.delegate.Reset()
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index b00172aa40..adaacba827 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1855,9 +1855,6 @@ github.com/prometheus/procfs/internal/util
github.com/prometheus/statsd_exporter/pkg/level
github.com/prometheus/statsd_exporter/pkg/mapper
github.com/prometheus/statsd_exporter/pkg/mapper/fsm
-# github.com/r3labs/sse/v2 v2.10.0
-## explicit; go 1.13
-github.com/r3labs/sse/v2
# github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9
## explicit
github.com/rcrowley/go-metrics
@@ -2115,6 +2112,10 @@ github.com/tklauser/go-sysconf
# github.com/tklauser/numcpus v0.11.0
## explicit; go 1.24.0
github.com/tklauser/numcpus
+# github.com/tmaxmax/go-sse v0.11.0
+## explicit; go 1.22
+github.com/tmaxmax/go-sse
+github.com/tmaxmax/go-sse/internal/parser
# github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208
## explicit
github.com/toorop/go-dkim
@@ -2705,9 +2706,6 @@ google.golang.org/protobuf/types/known/fieldmaskpb
google.golang.org/protobuf/types/known/structpb
google.golang.org/protobuf/types/known/timestamppb
google.golang.org/protobuf/types/known/wrapperspb
-# gopkg.in/cenkalti/backoff.v1 v1.1.0
-## explicit
-gopkg.in/cenkalti/backoff.v1
# gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
## explicit
gopkg.in/tomb.v1