diff --git a/editor.go b/editor.go index d7afef5..cda4f4b 100644 --- a/editor.go +++ b/editor.go @@ -26,7 +26,7 @@ func (a *goBlog) serveEditor(w http.ResponseWriter, r *http.Request) { func (a *goBlog) serveEditorPreview(w http.ResponseWriter, r *http.Request) { blog, _ := a.getBlog(r) - c, err := ws.Accept(w, r, nil) + c, err := ws.Accept(w, r, &ws.AcceptOptions{CompressionMode: ws.CompressionContextTakeover}) if err != nil { return } diff --git a/go.mod b/go.mod index 78f5177..e3c57ed 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/jlelse/feeds v1.2.1-0.20210704161900-189f94254ad4 github.com/justinas/alice v1.2.0 github.com/kaorimatz/go-opml v0.0.0-20210201121027-bc8e2852d7f9 - github.com/klauspost/compress v1.14.2 + github.com/klauspost/compress v1.14.3 github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible github.com/lopezator/migrator v0.3.0 github.com/mattn/go-sqlite3 v1.14.11 diff --git a/go.sum b/go.sum index aae48a2..639bba9 100644 --- a/go.sum +++ b/go.sum @@ -275,8 +275,8 @@ github.com/kaorimatz/go-opml v0.0.0-20210201121027-bc8e2852d7f9 h1:+9REu9CK9D1AQ github.com/kaorimatz/go-opml v0.0.0-20210201121027-bc8e2852d7f9/go.mod h1:OvY5ZBrAC9kOvM2PZs9Lw0BH+5K7tjrT6T7SFhn27OA= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw= -github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.14.3 h1:DQv1WP+iS4srNjibdnHtqu8JNWCDMluj5NzPnFJsnvk= +github.com/klauspost/compress v1.14.3/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= diff --git a/http.go b/http.go index 0bff2ed..3d4fba0 100644 --- a/http.go +++ b/http.go @@ -4,7 +4,6 @@ import ( "database/sql" "errors" "fmt" - "io" "log" "net" "net/http" @@ -16,8 +15,7 @@ import ( "github.com/go-chi/chi/v5/middleware" "github.com/justinas/alice" "github.com/klauspost/compress/flate" - "github.com/klauspost/compress/gzip" - "go.goblog.app/app/pkgs/contenttype" + "go.goblog.app/app/pkgs/httpcompress" "go.goblog.app/app/pkgs/maprouter" "golang.org/x/net/context" ) @@ -43,31 +41,7 @@ func (a *goBlog) startServer() (err error) { if a.cfg.Server.Logging { h = h.Append(a.logMiddleware) } - compressor := middleware.NewCompressor(flate.BestCompression, []string{ - contenttype.AS, - contenttype.ATOM, - contenttype.CSS, - contenttype.HTML, - contenttype.JS, - contenttype.JSON, - contenttype.JSONFeed, - contenttype.LDJSON, - contenttype.RSS, - contenttype.Text, - contenttype.XML, - "application/opensearchdescription+xml", - "application/jrd+json", - "application/xrd+xml", - }...) - compressor.SetEncoder("deflate", func(w io.Writer, level int) io.Writer { - cw, _ := flate.NewWriter(w, level) - return cw - }) - compressor.SetEncoder("gzip", func(w io.Writer, level int) io.Writer { - cw, _ := gzip.NewWriterLevel(w, level) - return cw - }) - h = h.Append(middleware.Recoverer, compressor.Handler, middleware.Heartbeat("/ping")) + h = h.Append(middleware.Recoverer, httpcompress.Compress(flate.BestCompression), middleware.Heartbeat("/ping")) if a.httpsConfigured(false) { h = h.Append(a.securityHeaders) } diff --git a/httpClient_test.go b/httpClient_test.go index 1fd424e..7b71d9e 100644 --- a/httpClient_test.go +++ b/httpClient_test.go @@ -8,11 +8,11 @@ import ( ) type fakeHttpClient struct { - *http.Client - req *http.Request - res *http.Response - handler http.Handler mu sync.Mutex + handler http.Handler + *http.Client + req *http.Request + res *http.Response } func newFakeHttpClient() *fakeHttpClient { diff --git a/markdown.go b/markdown.go index 1003926..d40df98 100644 --- a/markdown.go +++ b/markdown.go @@ -122,8 +122,8 @@ func (a *goBlog) renderMdTitle(s string) string { // Links type customExtension struct { - absoluteLinks bool publicAddress string + absoluteLinks bool } func (l *customExtension) Extend(m goldmark.Markdown) { @@ -136,8 +136,8 @@ func (l *customExtension) Extend(m goldmark.Markdown) { } type customRenderer struct { - absoluteLinks bool publicAddress string + absoluteLinks bool } func (c *customRenderer) RegisterFuncs(r renderer.NodeRendererFuncRegisterer) { diff --git a/pkgs/httpcompress/httpCompress.go b/pkgs/httpcompress/httpCompress.go new file mode 100644 index 0000000..87f3195 --- /dev/null +++ b/pkgs/httpcompress/httpCompress.go @@ -0,0 +1,293 @@ +package httpcompress + +import ( + "bufio" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "strings" + "sync" + + "github.com/klauspost/compress/flate" + "github.com/klauspost/compress/gzip" + "github.com/thoas/go-funk" + + "go.goblog.app/app/pkgs/contenttype" +) + +var defaultCompressibleContentTypes = []string{ + contenttype.AS, + contenttype.ATOM, + contenttype.CSS, + contenttype.HTML, + contenttype.JS, + contenttype.JSON, + contenttype.JSONFeed, + contenttype.LDJSON, + contenttype.RSS, + contenttype.Text, + contenttype.XML, + "application/opensearchdescription+xml", + "application/jrd+json", + "application/xrd+xml", +} + +// Compress is a middleware that compresses response +// body of a given content types to a data format based +// on Accept-Encoding request header. It uses a given +// compression level. +// +// Passing a compression level of 5 is sensible value +func Compress(level int, types ...string) func(next http.Handler) http.Handler { + return NewCompressor(level, types...).Handler +} + +// Compressor represents a set of encoding configurations. +type Compressor struct { + // The mapping of pooled encoders to pools. + pooledEncoders map[string]*sync.Pool + // The set of content types allowed to be compressed. + allowedTypes map[string]interface{} + // The list of encoders in order of decreasing precedence. + encodingPrecedence []string + // The compression level. + level int +} + +// NewCompressor creates a new Compressor that will handle encoding responses. +// +// The level should be one of the ones defined in the flate package. +// The types are the content types that are allowed to be compressed. +func NewCompressor(level int, types ...string) *Compressor { + // If types are provided, set those as the allowed types. If none are + // provided, use the default list. + allowedTypes := map[string]interface{}{} + for _, t := range funk.ShortIf(len(types) > 0, types, defaultCompressibleContentTypes).([]string) { + allowedTypes[t] = nil + } + + c := &Compressor{ + level: level, + pooledEncoders: map[string]*sync.Pool{}, + allowedTypes: allowedTypes, + } + + c.SetEncoder("deflate", encoderDeflate) + c.SetEncoder("gzip", encoderGzip) + + return c +} + +// SetEncoder can be used to set the implementation of a compression algorithm. +// +// The encoding should be a standardised identifier. See: +// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding +func (c *Compressor) SetEncoder(encoding string, fn EncoderFunc) { + encoding = strings.ToLower(encoding) + if encoding == "" { + panic("the encoding can not be empty") + } + if fn == nil { + panic("attempted to set a nil encoder function") + } + + // Deleted already registered encoder + delete(c.pooledEncoders, encoding) + + c.pooledEncoders[encoding] = &sync.Pool{ + New: func() interface{} { + return fn(ioutil.Discard, c.level) + }, + } + + for i, v := range c.encodingPrecedence { + if v == encoding { + c.encodingPrecedence = append(c.encodingPrecedence[:i], c.encodingPrecedence[i+1:]...) + break + } + } + + c.encodingPrecedence = append([]string{encoding}, c.encodingPrecedence...) +} + +// Handler returns a new middleware that will compress the response based on the +// current Compressor. +func (c *Compressor) Handler(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + cw := &compressResponseWriter{ + compressor: c, + ResponseWriter: w, + request: r, + } + next.ServeHTTP(cw, r) + cw.Close() + }) +} + +func matchAcceptEncoding(accepted []string, encoding string) bool { + return funk.ContainsString(accepted, encoding) +} + +// An EncoderFunc is a function that wraps the provided io.Writer with a +// streaming compression algorithm and returns it. +// +// In case of failure, the function should return nil. +type EncoderFunc func(w io.Writer, level int) compressWriter + +// Interface for types that allow resetting io.Writers. +type compressWriter interface { + io.Writer + Reset(w io.Writer) + Flush() error +} + +type compressResponseWriter struct { + http.ResponseWriter // The response writer to delegate to. + encoder compressWriter // The encoder to use (if any). + cleanup func() // Cleanup function to reset and repool encoder. + compressor *Compressor // Holds the compressor configuration. + request *http.Request // The request that is being handled. + wroteHeader bool // Whether the header has been written. + closed bool // Whether the connection has been closed. +} + +func (cw *compressResponseWriter) isCompressable() bool { + // Parse the first part of the Content-Type response header. + contentType := cw.Header().Get("Content-Type") + if idx := strings.Index(contentType, ";"); idx >= 0 { + contentType = contentType[0:idx] + } + + // Is the content type compressable? + _, ok := cw.compressor.allowedTypes[contentType] + return ok +} + +func (cw *compressResponseWriter) writer() io.Writer { + if cw.encoder != nil { + return cw.encoder + } + return cw.ResponseWriter +} + +// selectEncoder returns the encoder, the name of the encoder, and a closer function. +func (cw *compressResponseWriter) selectEncoder() (compressWriter, string, func()) { + // Parse the names of all accepted algorithms from the header. + accepted := strings.Split(strings.ToLower(cw.request.Header.Get("Accept-Encoding")), ",") + + // Find supported encoder by accepted list by precedence + for _, name := range cw.compressor.encodingPrecedence { + if matchAcceptEncoding(accepted, name) { + if pool, ok := cw.compressor.pooledEncoders[name]; ok { + encoder := pool.Get().(compressWriter) + cleanup := func() { + encoder.Reset(nil) + pool.Put(encoder) + } + encoder.Reset(cw.ResponseWriter) + return encoder, name, cleanup + } + } + } + + // No encoder found to match the accepted encoding + return nil, "", nil +} + +func (cw *compressResponseWriter) doCleanup() { + if cw.encoder != nil { + cw.encoder = nil + cw.cleanup() + cw.cleanup = nil + } +} + +func (cw *compressResponseWriter) WriteHeader(code int) { + defer cw.ResponseWriter.WriteHeader(code) + + if cw.wroteHeader { + return + } + + cw.wroteHeader = true + + if cw.Header().Get("Content-Encoding") != "" { + // Data has already been compressed. + return + } + + if !cw.isCompressable() { + // Data is not compressable. + return + } + + var encoding string + cw.encoder, encoding, cw.cleanup = cw.selectEncoder() + if encoding != "" { + cw.Header().Set("Content-Encoding", encoding) + cw.Header().Add("Vary", "Accept-Encoding") + + // The content-length after compression is unknown + cw.Header().Del("Content-Length") + } +} + +func (cw *compressResponseWriter) Write(p []byte) (int, error) { + if !cw.wroteHeader { + cw.WriteHeader(http.StatusOK) + } + return cw.writer().Write(p) +} + +func (cw *compressResponseWriter) Flush() { + if cw.encoder != nil { + cw.encoder.Flush() + } + if f, ok := cw.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} + +func (cw *compressResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if hj, ok := cw.writer().(http.Hijacker); ok { + return hj.Hijack() + } + return nil, nil, errors.New("http.Hijacker is unavailable on the writer") +} + +func (cw *compressResponseWriter) Push(target string, opts *http.PushOptions) error { + if ps, ok := cw.writer().(http.Pusher); ok { + return ps.Push(target, opts) + } + return errors.New("http.Pusher is unavailable on the writer") +} + +func (cw *compressResponseWriter) Close() error { + if cw.closed { + return nil + } + cw.closed = true + defer cw.doCleanup() + if c, ok := cw.writer().(io.WriteCloser); ok { + return c.Close() + } + return errors.New("io.WriteCloser is unavailable on the writer") +} + +func encoderGzip(w io.Writer, level int) compressWriter { + gw, err := gzip.NewWriterLevel(w, level) + if err != nil { + return nil + } + return gw +} + +func encoderDeflate(w io.Writer, level int) compressWriter { + dw, err := flate.NewWriter(w, level) + if err != nil { + return nil + } + return dw +} diff --git a/privateMode.go b/privateMode.go index f1beb99..7fba0aa 100644 --- a/privateMode.go +++ b/privateMode.go @@ -11,11 +11,12 @@ func (a *goBlog) isPrivate() bool { } func (a *goBlog) privateModeHandler(next http.Handler) http.Handler { + private := alice.New(a.authMiddleware).Then(next) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if a.isPrivate() { - alice.New(a.authMiddleware).Then(next).ServeHTTP(w, r) - } else { - next.ServeHTTP(w, r) + private.ServeHTTP(w, r) + return } + next.ServeHTTP(w, r) }) } diff --git a/queue.go b/queue.go index afd2e18..493c4ec 100644 --- a/queue.go +++ b/queue.go @@ -9,10 +9,10 @@ import ( ) type queueItem struct { - id int + schedule time.Time name string content []byte - schedule *time.Time + id int } func (db *database) enqueue(name string, content []byte, schedule time.Time) error { @@ -64,6 +64,6 @@ func (db *database) peekQueue(name string) (*queueItem, error) { if err != nil { return nil, err } - qi.schedule = &t + qi.schedule = t return qi, nil }