mirror of
https://codeberg.org/Codeberg/pages-server.git
synced 2024-11-30 21:25:26 +00:00
dont access global vars inject them
This commit is contained in:
parent
fb5726bd20
commit
bdc2d0c259
2
go.mod
2
go.mod
@ -7,7 +7,7 @@ require (
|
|||||||
github.com/akrylysov/pogreb v0.10.1
|
github.com/akrylysov/pogreb v0.10.1
|
||||||
github.com/go-acme/lego/v4 v4.5.3
|
github.com/go-acme/lego/v4 v4.5.3
|
||||||
github.com/reugn/equalizer v0.0.0-20210216135016-a959c509d7ad
|
github.com/reugn/equalizer v0.0.0-20210216135016-a959c509d7ad
|
||||||
github.com/rs/zerolog v1.26.0 // indirect
|
github.com/rs/zerolog v1.26.0
|
||||||
github.com/valyala/fasthttp v1.31.0
|
github.com/valyala/fasthttp v1.31.0
|
||||||
github.com/valyala/fastjson v1.6.3
|
github.com/valyala/fastjson v1.6.3
|
||||||
)
|
)
|
||||||
|
4
go.sum
4
go.sum
@ -596,8 +596,8 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||||
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q=
|
|
||||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d h1:20cMwl2fHAzkJMEA+8J4JgqBQcQGzbisXo31MIeenXI=
|
||||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
@ -661,8 +661,8 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4=
|
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e h1:WUoyKPm6nCo1BnNUvPGnFG3T5DUVem42yDJZZ4CNxMA=
|
||||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
544
handler.go
544
handler.go
@ -1,544 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"mime"
|
|
||||||
"path"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/OrlovEvgeny/go-mcache"
|
|
||||||
"github.com/rs/zerolog/log"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"github.com/valyala/fastjson"
|
|
||||||
)
|
|
||||||
|
|
||||||
// handler handles a single HTTP request to the web server.
|
|
||||||
func handler(ctx *fasthttp.RequestCtx) {
|
|
||||||
log := log.With().Str("handler", string(ctx.Request.Header.RequestURI())).Logger()
|
|
||||||
|
|
||||||
ctx.Response.Header.Set("Server", "Codeberg Pages")
|
|
||||||
|
|
||||||
// Force new default from specification (since November 2020) - see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy#strict-origin-when-cross-origin
|
|
||||||
ctx.Response.Header.Set("Referrer-Policy", "strict-origin-when-cross-origin")
|
|
||||||
|
|
||||||
// Enable browser caching for up to 10 minutes
|
|
||||||
ctx.Response.Header.Set("Cache-Control", "public, max-age=600")
|
|
||||||
|
|
||||||
trimmedHost := TrimHostPort(ctx.Request.Host())
|
|
||||||
|
|
||||||
// Add HSTS for RawDomain and MainDomainSuffix
|
|
||||||
if hsts := GetHSTSHeader(trimmedHost); hsts != "" {
|
|
||||||
ctx.Response.Header.Set("Strict-Transport-Security", hsts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block all methods not required for static pages
|
|
||||||
if !ctx.IsGet() && !ctx.IsHead() && !ctx.IsOptions() {
|
|
||||||
ctx.Response.Header.Set("Allow", "GET, HEAD, OPTIONS")
|
|
||||||
ctx.Error("Method not allowed", fasthttp.StatusMethodNotAllowed)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block blacklisted paths (like ACME challenges)
|
|
||||||
for _, blacklistedPath := range BlacklistedPaths {
|
|
||||||
if bytes.HasPrefix(ctx.Path(), blacklistedPath) {
|
|
||||||
returnErrorPage(ctx, fasthttp.StatusForbidden)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allow CORS for specified domains
|
|
||||||
if ctx.IsOptions() {
|
|
||||||
allowCors := false
|
|
||||||
for _, allowedCorsDomain := range AllowedCorsDomains {
|
|
||||||
if bytes.Equal(trimmedHost, allowedCorsDomain) {
|
|
||||||
allowCors = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if allowCors {
|
|
||||||
ctx.Response.Header.Set("Access-Control-Allow-Origin", "*")
|
|
||||||
ctx.Response.Header.Set("Access-Control-Allow-Methods", "GET, HEAD")
|
|
||||||
}
|
|
||||||
ctx.Response.Header.Set("Allow", "GET, HEAD, OPTIONS")
|
|
||||||
ctx.Response.Header.SetStatusCode(fasthttp.StatusNoContent)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare request information to Gitea
|
|
||||||
var targetOwner, targetRepo, targetBranch, targetPath string
|
|
||||||
var targetOptions = &upstreamOptions{
|
|
||||||
ForbiddenMimeTypes: map[string]struct{}{},
|
|
||||||
TryIndexPages: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
// tryBranch checks if a branch exists and populates the target variables. If canonicalLink is non-empty, it will
|
|
||||||
// also disallow search indexing and add a Link header to the canonical URL.
|
|
||||||
var tryBranch = func(repo string, branch string, path []string, canonicalLink string) bool {
|
|
||||||
if repo == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the branch exists, otherwise treat it as a file path
|
|
||||||
branchTimestampResult := getBranchTimestamp(targetOwner, repo, branch)
|
|
||||||
if branchTimestampResult == nil {
|
|
||||||
// branch doesn't exist
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Branch exists, use it
|
|
||||||
targetRepo = repo
|
|
||||||
targetPath = strings.Trim(strings.Join(path, "/"), "/")
|
|
||||||
targetBranch = branchTimestampResult.branch
|
|
||||||
|
|
||||||
targetOptions.BranchTimestamp = branchTimestampResult.timestamp
|
|
||||||
|
|
||||||
if canonicalLink != "" {
|
|
||||||
// Hide from search machines & add canonical link
|
|
||||||
ctx.Response.Header.Set("X-Robots-Tag", "noarchive, noindex")
|
|
||||||
ctx.Response.Header.Set("Link",
|
|
||||||
strings.NewReplacer("%b", targetBranch, "%p", targetPath).Replace(canonicalLink)+
|
|
||||||
"; rel=\"canonical\"",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// tryUpstream forwards the target request to the Gitea API, and shows an error page on failure.
|
|
||||||
var tryUpstream = func() {
|
|
||||||
// check if a canonical domain exists on a request on MainDomain
|
|
||||||
if bytes.HasSuffix(trimmedHost, MainDomainSuffix) {
|
|
||||||
canonicalDomain, _ := checkCanonicalDomain(targetOwner, targetRepo, targetBranch, "")
|
|
||||||
if !strings.HasSuffix(strings.SplitN(canonicalDomain, "/", 2)[0], string(MainDomainSuffix)) {
|
|
||||||
canonicalPath := string(ctx.RequestURI())
|
|
||||||
if targetRepo != "pages" {
|
|
||||||
canonicalPath = "/" + strings.SplitN(canonicalPath, "/", 3)[2]
|
|
||||||
}
|
|
||||||
ctx.Redirect("https://"+canonicalDomain+canonicalPath, fasthttp.StatusTemporaryRedirect)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to request the file from the Gitea API
|
|
||||||
if !upstream(ctx, targetOwner, targetRepo, targetBranch, targetPath, targetOptions) {
|
|
||||||
returnErrorPage(ctx, ctx.Response.StatusCode())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debug().Msg("preparations")
|
|
||||||
|
|
||||||
if RawDomain != nil && bytes.Equal(trimmedHost, RawDomain) {
|
|
||||||
// Serve raw content from RawDomain
|
|
||||||
log.Debug().Msg("raw domain")
|
|
||||||
|
|
||||||
targetOptions.TryIndexPages = false
|
|
||||||
targetOptions.ForbiddenMimeTypes["text/html"] = struct{}{}
|
|
||||||
targetOptions.DefaultMimeType = "text/plain; charset=utf-8"
|
|
||||||
|
|
||||||
pathElements := strings.Split(string(bytes.Trim(ctx.Request.URI().Path(), "/")), "/")
|
|
||||||
if len(pathElements) < 2 {
|
|
||||||
// https://{RawDomain}/{owner}/{repo}[/@{branch}]/{path} is required
|
|
||||||
ctx.Redirect(RawInfoPage, fasthttp.StatusTemporaryRedirect)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
targetOwner = pathElements[0]
|
|
||||||
targetRepo = pathElements[1]
|
|
||||||
|
|
||||||
// raw.codeberg.org/example/myrepo/@main/index.html
|
|
||||||
if len(pathElements) > 2 && strings.HasPrefix(pathElements[2], "@") {
|
|
||||||
log.Debug().Msg("raw domain preparations, now trying with specified branch")
|
|
||||||
if tryBranch(targetRepo, pathElements[2][1:], pathElements[3:],
|
|
||||||
string(GiteaRoot)+"/"+targetOwner+"/"+targetRepo+"/src/branch/%b/%p",
|
|
||||||
) {
|
|
||||||
log.Debug().Msg("tryBranch, now trying upstream")
|
|
||||||
tryUpstream()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Debug().Msg("missing branch")
|
|
||||||
returnErrorPage(ctx, fasthttp.StatusFailedDependency)
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
log.Debug().Msg("raw domain preparations, now trying with default branch")
|
|
||||||
tryBranch(targetRepo, "", pathElements[2:],
|
|
||||||
string(GiteaRoot)+"/"+targetOwner+"/"+targetRepo+"/src/branch/%b/%p",
|
|
||||||
)
|
|
||||||
log.Debug().Msg("tryBranch, now trying upstream")
|
|
||||||
tryUpstream()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
} else if bytes.HasSuffix(trimmedHost, MainDomainSuffix) {
|
|
||||||
// Serve pages from subdomains of MainDomainSuffix
|
|
||||||
log.Debug().Msg("main domain suffix")
|
|
||||||
|
|
||||||
pathElements := strings.Split(string(bytes.Trim(ctx.Request.URI().Path(), "/")), "/")
|
|
||||||
targetOwner = string(bytes.TrimSuffix(trimmedHost, MainDomainSuffix))
|
|
||||||
targetRepo = pathElements[0]
|
|
||||||
targetPath = strings.Trim(strings.Join(pathElements[1:], "/"), "/")
|
|
||||||
|
|
||||||
if targetOwner == "www" {
|
|
||||||
// www.codeberg.page redirects to codeberg.page
|
|
||||||
ctx.Redirect("https://"+string(MainDomainSuffix[1:])+string(ctx.Path()), fasthttp.StatusPermanentRedirect)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the first directory is a repo with the second directory as a branch
|
|
||||||
// example.codeberg.page/myrepo/@main/index.html
|
|
||||||
if len(pathElements) > 1 && strings.HasPrefix(pathElements[1], "@") {
|
|
||||||
if targetRepo == "pages" {
|
|
||||||
// example.codeberg.org/pages/@... redirects to example.codeberg.org/@...
|
|
||||||
ctx.Redirect("/"+strings.Join(pathElements[1:], "/"), fasthttp.StatusTemporaryRedirect)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debug().Msg("main domain preparations, now trying with specified repo & branch")
|
|
||||||
if tryBranch(pathElements[0], pathElements[1][1:], pathElements[2:],
|
|
||||||
"/"+pathElements[0]+"/%p",
|
|
||||||
) {
|
|
||||||
log.Debug().Msg("tryBranch, now trying upstream")
|
|
||||||
tryUpstream()
|
|
||||||
} else {
|
|
||||||
returnErrorPage(ctx, fasthttp.StatusFailedDependency)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the first directory is a branch for the "pages" repo
|
|
||||||
// example.codeberg.page/@main/index.html
|
|
||||||
if strings.HasPrefix(pathElements[0], "@") {
|
|
||||||
log.Debug().Msg("main domain preparations, now trying with specified branch")
|
|
||||||
if tryBranch("pages", pathElements[0][1:], pathElements[1:], "/%p") {
|
|
||||||
log.Debug().Msg("tryBranch, now trying upstream")
|
|
||||||
tryUpstream()
|
|
||||||
} else {
|
|
||||||
returnErrorPage(ctx, fasthttp.StatusFailedDependency)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the first directory is a repo with a "pages" branch
|
|
||||||
// example.codeberg.page/myrepo/index.html
|
|
||||||
// example.codeberg.page/pages/... is not allowed here.
|
|
||||||
log.Debug().Msg("main domain preparations, now trying with specified repo")
|
|
||||||
if pathElements[0] != "pages" && tryBranch(pathElements[0], "pages", pathElements[1:], "") {
|
|
||||||
log.Debug().Msg("tryBranch, now trying upstream")
|
|
||||||
tryUpstream()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to use the "pages" repo on its default branch
|
|
||||||
// example.codeberg.page/index.html
|
|
||||||
log.Debug().Msg("main domain preparations, now trying with default repo/branch")
|
|
||||||
if tryBranch("pages", "", pathElements, "") {
|
|
||||||
log.Debug().Msg("tryBranch, now trying upstream")
|
|
||||||
tryUpstream()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Couldn't find a valid repo/branch
|
|
||||||
returnErrorPage(ctx, fasthttp.StatusFailedDependency)
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
trimmedHostStr := string(trimmedHost)
|
|
||||||
|
|
||||||
// Serve pages from external domains
|
|
||||||
targetOwner, targetRepo, targetBranch = getTargetFromDNS(trimmedHostStr)
|
|
||||||
if targetOwner == "" {
|
|
||||||
returnErrorPage(ctx, fasthttp.StatusFailedDependency)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
pathElements := strings.Split(string(bytes.Trim(ctx.Request.URI().Path(), "/")), "/")
|
|
||||||
canonicalLink := ""
|
|
||||||
if strings.HasPrefix(pathElements[0], "@") {
|
|
||||||
targetBranch = pathElements[0][1:]
|
|
||||||
pathElements = pathElements[1:]
|
|
||||||
canonicalLink = "/%p"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to use the given repo on the given branch or the default branch
|
|
||||||
log.Debug().Msg("custom domain preparations, now trying with details from DNS")
|
|
||||||
if tryBranch(targetRepo, targetBranch, pathElements, canonicalLink) {
|
|
||||||
canonicalDomain, valid := checkCanonicalDomain(targetOwner, targetRepo, targetBranch, trimmedHostStr)
|
|
||||||
if !valid {
|
|
||||||
returnErrorPage(ctx, fasthttp.StatusMisdirectedRequest)
|
|
||||||
return
|
|
||||||
} else if canonicalDomain != trimmedHostStr {
|
|
||||||
// only redirect if the target is also a codeberg page!
|
|
||||||
targetOwner, _, _ = getTargetFromDNS(strings.SplitN(canonicalDomain, "/", 2)[0])
|
|
||||||
if targetOwner != "" {
|
|
||||||
ctx.Redirect("https://"+canonicalDomain+string(ctx.RequestURI()), fasthttp.StatusTemporaryRedirect)
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
returnErrorPage(ctx, fasthttp.StatusFailedDependency)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debug().Msg("tryBranch, now trying upstream")
|
|
||||||
tryUpstream()
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
returnErrorPage(ctx, fasthttp.StatusFailedDependency)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// returnErrorPage sets the response status code and writes NotFoundPage to the response body, with "%status" replaced
|
|
||||||
// with the provided status code.
|
|
||||||
func returnErrorPage(ctx *fasthttp.RequestCtx, code int) {
|
|
||||||
ctx.Response.SetStatusCode(code)
|
|
||||||
ctx.Response.Header.SetContentType("text/html; charset=utf-8")
|
|
||||||
message := fasthttp.StatusMessage(code)
|
|
||||||
if code == fasthttp.StatusMisdirectedRequest {
|
|
||||||
message += " - domain not specified in <code>.domains</code> file"
|
|
||||||
}
|
|
||||||
if code == fasthttp.StatusFailedDependency {
|
|
||||||
message += " - target repo/branch doesn't exist or is private"
|
|
||||||
}
|
|
||||||
// TODO: use template engine?
|
|
||||||
ctx.Response.SetBody(bytes.ReplaceAll(NotFoundPage, []byte("%status"), []byte(strconv.Itoa(code)+" "+message)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultBranchCacheTimeout specifies the timeout for the default branch cache. It can be quite long.
|
|
||||||
var DefaultBranchCacheTimeout = 15 * time.Minute
|
|
||||||
|
|
||||||
// BranchExistanceCacheTimeout specifies the timeout for the branch timestamp & existance cache. It should be shorter
|
|
||||||
// than FileCacheTimeout, as that gets invalidated if the branch timestamp has changed. That way, repo changes will be
|
|
||||||
// picked up faster, while still allowing the content to be cached longer if nothing changes.
|
|
||||||
var BranchExistanceCacheTimeout = 5 * time.Minute
|
|
||||||
|
|
||||||
// branchTimestampCache stores branch timestamps for faster cache checking
|
|
||||||
var branchTimestampCache = mcache.New()
|
|
||||||
|
|
||||||
type branchTimestamp struct {
|
|
||||||
branch string
|
|
||||||
timestamp time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileCacheTimeout specifies the timeout for the file content cache - you might want to make this quite long, depending
|
|
||||||
// on your available memory.
|
|
||||||
var FileCacheTimeout = 5 * time.Minute
|
|
||||||
|
|
||||||
// FileCacheSizeLimit limits the maximum file size that will be cached, and is set to 1 MB by default.
|
|
||||||
var FileCacheSizeLimit = 1024 * 1024
|
|
||||||
|
|
||||||
// fileResponseCache stores responses from the Gitea server
|
|
||||||
// TODO: make this an MRU cache with a size limit
|
|
||||||
var fileResponseCache = mcache.New()
|
|
||||||
|
|
||||||
type fileResponse struct {
|
|
||||||
exists bool
|
|
||||||
mimeType string
|
|
||||||
body []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// getBranchTimestamp finds the default branch (if branch is "") and returns the last modification time of the branch
|
|
||||||
// (or nil if the branch doesn't exist)
|
|
||||||
func getBranchTimestamp(owner, repo, branch string) *branchTimestamp {
|
|
||||||
if result, ok := branchTimestampCache.Get(owner + "/" + repo + "/" + branch); ok {
|
|
||||||
if result == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return result.(*branchTimestamp)
|
|
||||||
}
|
|
||||||
result := &branchTimestamp{}
|
|
||||||
result.branch = branch
|
|
||||||
if branch == "" {
|
|
||||||
// Get default branch
|
|
||||||
var body = make([]byte, 0)
|
|
||||||
// TODO: use header for API key?
|
|
||||||
status, body, err := fasthttp.GetTimeout(body, string(GiteaRoot)+"/api/v1/repos/"+owner+"/"+repo+"?access_token="+GiteaApiToken, 5*time.Second)
|
|
||||||
if err != nil || status != 200 {
|
|
||||||
_ = branchTimestampCache.Set(owner+"/"+repo+"/"+branch, nil, DefaultBranchCacheTimeout)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
result.branch = fastjson.GetString(body, "default_branch")
|
|
||||||
}
|
|
||||||
|
|
||||||
var body = make([]byte, 0)
|
|
||||||
status, body, err := fasthttp.GetTimeout(body, string(GiteaRoot)+"/api/v1/repos/"+owner+"/"+repo+"/branches/"+branch+"?access_token="+GiteaApiToken, 5*time.Second)
|
|
||||||
if err != nil || status != 200 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result.timestamp, _ = time.Parse(time.RFC3339, fastjson.GetString(body, "commit", "timestamp"))
|
|
||||||
_ = branchTimestampCache.Set(owner+"/"+repo+"/"+branch, result, BranchExistanceCacheTimeout)
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
var upstreamClient = fasthttp.Client{
|
|
||||||
ReadTimeout: 10 * time.Second,
|
|
||||||
MaxConnDuration: 60 * time.Second,
|
|
||||||
MaxConnWaitTimeout: 1000 * time.Millisecond,
|
|
||||||
MaxConnsPerHost: 128 * 16, // TODO: adjust bottlenecks for best performance with Gitea!
|
|
||||||
}
|
|
||||||
|
|
||||||
// upstream requests a file from the Gitea API at GiteaRoot and writes it to the request context.
|
|
||||||
func upstream(ctx *fasthttp.RequestCtx, targetOwner, targetRepo, targetBranch, targetPath string, options *upstreamOptions) (final bool) {
|
|
||||||
log := log.With().Strs("upstream", []string{targetOwner, targetRepo, targetBranch, targetPath}).Logger()
|
|
||||||
|
|
||||||
if options.ForbiddenMimeTypes == nil {
|
|
||||||
options.ForbiddenMimeTypes = map[string]struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the branch exists and when it was modified
|
|
||||||
if options.BranchTimestamp == (time.Time{}) {
|
|
||||||
branch := getBranchTimestamp(targetOwner, targetRepo, targetBranch)
|
|
||||||
|
|
||||||
if branch == nil {
|
|
||||||
returnErrorPage(ctx, fasthttp.StatusFailedDependency)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
targetBranch = branch.branch
|
|
||||||
options.BranchTimestamp = branch.timestamp
|
|
||||||
}
|
|
||||||
|
|
||||||
if targetOwner == "" || targetRepo == "" || targetBranch == "" {
|
|
||||||
returnErrorPage(ctx, fasthttp.StatusBadRequest)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the browser has a cached version
|
|
||||||
if ifModifiedSince, err := time.Parse(time.RFC1123, string(ctx.Request.Header.Peek("If-Modified-Since"))); err == nil {
|
|
||||||
if !ifModifiedSince.Before(options.BranchTimestamp) {
|
|
||||||
ctx.Response.SetStatusCode(fasthttp.StatusNotModified)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.Debug().Msg("preparations")
|
|
||||||
|
|
||||||
// Make a GET request to the upstream URL
|
|
||||||
uri := targetOwner + "/" + targetRepo + "/raw/" + targetBranch + "/" + targetPath
|
|
||||||
var req *fasthttp.Request
|
|
||||||
var res *fasthttp.Response
|
|
||||||
var cachedResponse fileResponse
|
|
||||||
var err error
|
|
||||||
if cachedValue, ok := fileResponseCache.Get(uri + "?timestamp=" + strconv.FormatInt(options.BranchTimestamp.Unix(), 10)); ok && len(cachedValue.(fileResponse).body) > 0 {
|
|
||||||
cachedResponse = cachedValue.(fileResponse)
|
|
||||||
} else {
|
|
||||||
req = fasthttp.AcquireRequest()
|
|
||||||
req.SetRequestURI(string(GiteaRoot) + "/api/v1/repos/" + uri + "?access_token=" + GiteaApiToken)
|
|
||||||
res = fasthttp.AcquireResponse()
|
|
||||||
res.SetBodyStream(&strings.Reader{}, -1)
|
|
||||||
err = upstreamClient.Do(req, res)
|
|
||||||
}
|
|
||||||
log.Debug().Msg("acquisition")
|
|
||||||
|
|
||||||
// Handle errors
|
|
||||||
if (res == nil && !cachedResponse.exists) || (res != nil && res.StatusCode() == fasthttp.StatusNotFound) {
|
|
||||||
if options.TryIndexPages {
|
|
||||||
// copy the options struct & try if an index page exists
|
|
||||||
optionsForIndexPages := *options
|
|
||||||
optionsForIndexPages.TryIndexPages = false
|
|
||||||
optionsForIndexPages.AppendTrailingSlash = true
|
|
||||||
for _, indexPage := range IndexPages {
|
|
||||||
if upstream(ctx, targetOwner, targetRepo, targetBranch, strings.TrimSuffix(targetPath, "/")+"/"+indexPage, &optionsForIndexPages) {
|
|
||||||
_ = fileResponseCache.Set(uri+"?timestamp="+strconv.FormatInt(options.BranchTimestamp.Unix(), 10), fileResponse{
|
|
||||||
exists: false,
|
|
||||||
}, FileCacheTimeout)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// compatibility fix for GitHub Pages (/example → /example.html)
|
|
||||||
optionsForIndexPages.AppendTrailingSlash = false
|
|
||||||
optionsForIndexPages.RedirectIfExists = string(ctx.Request.URI().Path()) + ".html"
|
|
||||||
if upstream(ctx, targetOwner, targetRepo, targetBranch, targetPath+".html", &optionsForIndexPages) {
|
|
||||||
_ = fileResponseCache.Set(uri+"?timestamp="+strconv.FormatInt(options.BranchTimestamp.Unix(), 10), fileResponse{
|
|
||||||
exists: false,
|
|
||||||
}, FileCacheTimeout)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ctx.Response.SetStatusCode(fasthttp.StatusNotFound)
|
|
||||||
if res != nil {
|
|
||||||
// Update cache if the request is fresh
|
|
||||||
_ = fileResponseCache.Set(uri+"?timestamp="+strconv.FormatInt(options.BranchTimestamp.Unix(), 10), fileResponse{
|
|
||||||
exists: false,
|
|
||||||
}, FileCacheTimeout)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if res != nil && (err != nil || res.StatusCode() != fasthttp.StatusOK) {
|
|
||||||
fmt.Printf("Couldn't fetch contents from \"%s\": %s (status code %d)\n", req.RequestURI(), err, res.StatusCode())
|
|
||||||
returnErrorPage(ctx, fasthttp.StatusInternalServerError)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append trailing slash if missing (for index files), and redirect to fix filenames in general
|
|
||||||
// options.AppendTrailingSlash is only true when looking for index pages
|
|
||||||
if options.AppendTrailingSlash && !bytes.HasSuffix(ctx.Request.URI().Path(), []byte{'/'}) {
|
|
||||||
ctx.Redirect(string(ctx.Request.URI().Path())+"/", fasthttp.StatusTemporaryRedirect)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if bytes.HasSuffix(ctx.Request.URI().Path(), []byte("/index.html")) {
|
|
||||||
ctx.Redirect(strings.TrimSuffix(string(ctx.Request.URI().Path()), "index.html"), fasthttp.StatusTemporaryRedirect)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if options.RedirectIfExists != "" {
|
|
||||||
ctx.Redirect(options.RedirectIfExists, fasthttp.StatusTemporaryRedirect)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
log.Debug().Msg("error handling")
|
|
||||||
|
|
||||||
// Set the MIME type
|
|
||||||
mimeType := mime.TypeByExtension(path.Ext(targetPath))
|
|
||||||
mimeTypeSplit := strings.SplitN(mimeType, ";", 2)
|
|
||||||
if _, ok := options.ForbiddenMimeTypes[mimeTypeSplit[0]]; ok || mimeType == "" {
|
|
||||||
if options.DefaultMimeType != "" {
|
|
||||||
mimeType = options.DefaultMimeType
|
|
||||||
} else {
|
|
||||||
mimeType = "application/octet-stream"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ctx.Response.Header.SetContentType(mimeType)
|
|
||||||
|
|
||||||
// Everything's okay so far
|
|
||||||
ctx.Response.SetStatusCode(fasthttp.StatusOK)
|
|
||||||
ctx.Response.Header.SetLastModified(options.BranchTimestamp)
|
|
||||||
|
|
||||||
log.Debug().Msg("response preparations")
|
|
||||||
|
|
||||||
// Write the response body to the original request
|
|
||||||
var cacheBodyWriter bytes.Buffer
|
|
||||||
if res != nil {
|
|
||||||
if res.Header.ContentLength() > FileCacheSizeLimit {
|
|
||||||
err = res.BodyWriteTo(ctx.Response.BodyWriter())
|
|
||||||
} else {
|
|
||||||
// TODO: cache is half-empty if request is cancelled - does the ctx.Err() below do the trick?
|
|
||||||
err = res.BodyWriteTo(io.MultiWriter(ctx.Response.BodyWriter(), &cacheBodyWriter))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
_, err = ctx.Write(cachedResponse.body)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Couldn't write body for \"%s\": %s\n", req.RequestURI(), err)
|
|
||||||
returnErrorPage(ctx, fasthttp.StatusInternalServerError)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
log.Debug().Msg("response")
|
|
||||||
|
|
||||||
if res != nil && ctx.Err() == nil {
|
|
||||||
cachedResponse.exists = true
|
|
||||||
cachedResponse.mimeType = mimeType
|
|
||||||
cachedResponse.body = cacheBodyWriter.Bytes()
|
|
||||||
_ = fileResponseCache.Set(uri+"?timestamp="+strconv.FormatInt(options.BranchTimestamp.Unix(), 10), cachedResponse, FileCacheTimeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// upstreamOptions provides various options for the upstream request.
|
|
||||||
type upstreamOptions struct {
|
|
||||||
DefaultMimeType string
|
|
||||||
ForbiddenMimeTypes map[string]struct{}
|
|
||||||
TryIndexPages bool
|
|
||||||
AppendTrailingSlash bool
|
|
||||||
RedirectIfExists string
|
|
||||||
BranchTimestamp time.Time
|
|
||||||
}
|
|
6
html/html.go
Normal file
6
html/html.go
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
package html
|
||||||
|
|
||||||
|
import _ "embed"
|
||||||
|
|
||||||
|
//go:embed 404.html
|
||||||
|
var NotFoundPage []byte
|
52
main.go
52
main.go
@ -26,31 +26,28 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
_ "embed"
|
|
||||||
|
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
|
|
||||||
|
pages_server "codeberg.org/codeberg/pages/server"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MainDomainSuffix specifies the main domain (starting with a dot) for which subdomains shall be served as static
|
// MainDomainSuffix specifies the main domain (starting with a dot) for which subdomains shall be served as static
|
||||||
// pages, or used for comparison in CNAME lookups. Static pages can be accessed through
|
// pages, or used for comparison in CNAME lookups. Static pages can be accessed through
|
||||||
// https://{owner}.{MainDomain}[/{repo}], with repo defaulting to "pages".
|
// https://{owner}.{MainDomain}[/{repo}], with repo defaulting to "pages".
|
||||||
var MainDomainSuffix = []byte("." + envOr("PAGES_DOMAIN", "codeberg.page"))
|
var MainDomainSuffix = []byte("." + pages_server.EnvOr("PAGES_DOMAIN", "codeberg.page"))
|
||||||
|
|
||||||
// GiteaRoot specifies the root URL of the Gitea instance, without a trailing slash.
|
// GiteaRoot specifies the root URL of the Gitea instance, without a trailing slash.
|
||||||
var GiteaRoot = []byte(envOr("GITEA_ROOT", "https://codeberg.org"))
|
var GiteaRoot = []byte(pages_server.EnvOr("GITEA_ROOT", "https://codeberg.org"))
|
||||||
|
|
||||||
var GiteaApiToken = envOr("GITEA_API_TOKEN", "")
|
var GiteaApiToken = pages_server.EnvOr("GITEA_API_TOKEN", "")
|
||||||
|
|
||||||
//go:embed 404.html
|
|
||||||
var NotFoundPage []byte
|
|
||||||
|
|
||||||
// RawDomain specifies the domain from which raw repository content shall be served in the following format:
|
// RawDomain specifies the domain from which raw repository content shall be served in the following format:
|
||||||
// https://{RawDomain}/{owner}/{repo}[/{branch|tag|commit}/{version}]/{filepath...}
|
// https://{RawDomain}/{owner}/{repo}[/{branch|tag|commit}/{version}]/{filepath...}
|
||||||
// (set to []byte(nil) to disable raw content hosting)
|
// (set to []byte(nil) to disable raw content hosting)
|
||||||
var RawDomain = []byte(envOr("RAW_DOMAIN", "raw.codeberg.org"))
|
var RawDomain = []byte(pages_server.EnvOr("RAW_DOMAIN", "raw.codeberg.org"))
|
||||||
|
|
||||||
// RawInfoPage will be shown (with a redirect) when trying to access RawDomain directly (or without owner/repo/path).
|
// RawInfoPage will be shown (with a redirect) when trying to access RawDomain directly (or without owner/repo/path).
|
||||||
var RawInfoPage = envOr("REDIRECT_RAW_INFO", "https://docs.codeberg.org/pages/raw-content/")
|
var RawInfoPage = pages_server.EnvOr("REDIRECT_RAW_INFO", "https://docs.codeberg.org/pages/raw-content/")
|
||||||
|
|
||||||
// AllowedCorsDomains lists the domains for which Cross-Origin Resource Sharing is allowed.
|
// AllowedCorsDomains lists the domains for which Cross-Origin Resource Sharing is allowed.
|
||||||
var AllowedCorsDomains = [][]byte{
|
var AllowedCorsDomains = [][]byte{
|
||||||
@ -64,11 +61,6 @@ var BlacklistedPaths = [][]byte{
|
|||||||
[]byte("/.well-known/acme-challenge/"),
|
[]byte("/.well-known/acme-challenge/"),
|
||||||
}
|
}
|
||||||
|
|
||||||
// IndexPages lists pages that may be considered as index pages for directories.
|
|
||||||
var IndexPages = []string{
|
|
||||||
"index.html",
|
|
||||||
}
|
|
||||||
|
|
||||||
// main sets up and starts the web server.
|
// main sets up and starts the web server.
|
||||||
func main() {
|
func main() {
|
||||||
// TODO: CLI Library
|
// TODO: CLI Library
|
||||||
@ -77,15 +69,15 @@ func main() {
|
|||||||
println("--remove-certificate requires at least one domain as an argument")
|
println("--remove-certificate requires at least one domain as an argument")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
if keyDatabaseErr != nil {
|
if pages_server.KeyDatabaseErr != nil {
|
||||||
panic(keyDatabaseErr)
|
panic(pages_server.KeyDatabaseErr)
|
||||||
}
|
}
|
||||||
for _, domain := range os.Args[2:] {
|
for _, domain := range os.Args[2:] {
|
||||||
if err := keyDatabase.Delete([]byte(domain)); err != nil {
|
if err := pages_server.KeyDatabase.Delete([]byte(domain)); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := keyDatabase.Sync(); err != nil {
|
if err := pages_server.KeyDatabase.Sync(); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
@ -98,10 +90,13 @@ func main() {
|
|||||||
GiteaRoot = bytes.TrimSuffix(GiteaRoot, []byte{'/'})
|
GiteaRoot = bytes.TrimSuffix(GiteaRoot, []byte{'/'})
|
||||||
|
|
||||||
// Use HOST and PORT environment variables to determine listening address
|
// Use HOST and PORT environment variables to determine listening address
|
||||||
address := fmt.Sprintf("%s:%s", envOr("HOST", "[::]"), envOr("PORT", "443"))
|
address := fmt.Sprintf("%s:%s", pages_server.EnvOr("HOST", "[::]"), pages_server.EnvOr("PORT", "443"))
|
||||||
log.Printf("Listening on https://%s", address)
|
log.Printf("Listening on https://%s", address)
|
||||||
|
|
||||||
// Enable compression by wrapping the handler() method with the compression function provided by FastHTTP
|
// Create handler based on settings
|
||||||
|
handler := pages_server.Handler(MainDomainSuffix, RawDomain, GiteaRoot, RawInfoPage, GiteaApiToken, BlacklistedPaths, AllowedCorsDomains)
|
||||||
|
|
||||||
|
// Enable compression by wrapping the handler with the compression function provided by FastHTTP
|
||||||
compressedHandler := fasthttp.CompressHandlerBrotliLevel(handler, fasthttp.CompressBrotliBestSpeed, fasthttp.CompressBestSpeed)
|
compressedHandler := fasthttp.CompressHandlerBrotliLevel(handler, fasthttp.CompressBrotliBestSpeed, fasthttp.CompressBestSpeed)
|
||||||
|
|
||||||
server := &fasthttp.Server{
|
server := &fasthttp.Server{
|
||||||
@ -120,15 +115,15 @@ func main() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Couldn't create listener: %s", err)
|
log.Fatalf("Couldn't create listener: %s", err)
|
||||||
}
|
}
|
||||||
listener = tls.NewListener(listener, tlsConfig)
|
listener = tls.NewListener(listener, pages_server.TlsConfig(MainDomainSuffix, string(GiteaRoot), GiteaApiToken))
|
||||||
|
|
||||||
setupCertificates()
|
pages_server.SetupCertificates(MainDomainSuffix)
|
||||||
if os.Getenv("ENABLE_HTTP_SERVER") == "true" {
|
if os.Getenv("ENABLE_HTTP_SERVER") == "true" {
|
||||||
go (func() {
|
go (func() {
|
||||||
challengePath := []byte("/.well-known/acme-challenge/")
|
challengePath := []byte("/.well-known/acme-challenge/")
|
||||||
err := fasthttp.ListenAndServe("[::]:80", func(ctx *fasthttp.RequestCtx) {
|
err := fasthttp.ListenAndServe("[::]:80", func(ctx *fasthttp.RequestCtx) {
|
||||||
if bytes.HasPrefix(ctx.Path(), challengePath) {
|
if bytes.HasPrefix(ctx.Path(), challengePath) {
|
||||||
challenge, ok := challengeCache.Get(string(TrimHostPort(ctx.Host())) + "/" + string(bytes.TrimPrefix(ctx.Path(), challengePath)))
|
challenge, ok := pages_server.ChallengeCache.Get(string(pages_server.TrimHostPort(ctx.Host())) + "/" + string(bytes.TrimPrefix(ctx.Path(), challengePath)))
|
||||||
if !ok || challenge == nil {
|
if !ok || challenge == nil {
|
||||||
ctx.SetStatusCode(http.StatusNotFound)
|
ctx.SetStatusCode(http.StatusNotFound)
|
||||||
ctx.SetBodyString("no challenge for this token")
|
ctx.SetBodyString("no challenge for this token")
|
||||||
@ -150,12 +145,3 @@ func main() {
|
|||||||
log.Fatalf("Couldn't start server: %s", err)
|
log.Fatalf("Couldn't start server: %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// envOr reads an environment variable and returns a default value if it's empty.
|
|
||||||
// TODO: to helpers.go or use CLI framework
|
|
||||||
func envOr(env string, or string) string {
|
|
||||||
if v := os.Getenv(env); v != "" {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
return or
|
|
||||||
}
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
package main
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
@ -37,102 +37,104 @@ import (
|
|||||||
"github.com/go-acme/lego/v4/registration"
|
"github.com/go-acme/lego/v4/registration"
|
||||||
)
|
)
|
||||||
|
|
||||||
// tlsConfig contains the configuration for generating, serving and cleaning up Let's Encrypt certificates.
|
// TlsConfig returns the configuration for generating, serving and cleaning up Let's Encrypt certificates.
|
||||||
var tlsConfig = &tls.Config{
|
func TlsConfig(mainDomainSuffix []byte, giteaRoot, giteaApiToken string) *tls.Config {
|
||||||
// check DNS name & get certificate from Let's Encrypt
|
return &tls.Config{
|
||||||
GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
// check DNS name & get certificate from Let's Encrypt
|
||||||
sni := strings.ToLower(strings.TrimSpace(info.ServerName))
|
GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||||
sniBytes := []byte(sni)
|
sni := strings.ToLower(strings.TrimSpace(info.ServerName))
|
||||||
if len(sni) < 1 {
|
sniBytes := []byte(sni)
|
||||||
return nil, errors.New("missing sni")
|
if len(sni) < 1 {
|
||||||
}
|
return nil, errors.New("missing sni")
|
||||||
|
}
|
||||||
|
|
||||||
if info.SupportedProtos != nil {
|
if info.SupportedProtos != nil {
|
||||||
for _, proto := range info.SupportedProtos {
|
for _, proto := range info.SupportedProtos {
|
||||||
if proto == tlsalpn01.ACMETLS1Protocol {
|
if proto == tlsalpn01.ACMETLS1Protocol {
|
||||||
challenge, ok := challengeCache.Get(sni)
|
challenge, ok := ChallengeCache.Get(sni)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.New("no challenge for this domain")
|
return nil, errors.New("no challenge for this domain")
|
||||||
|
}
|
||||||
|
cert, err := tlsalpn01.ChallengeCert(sni, challenge.(string))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cert, nil
|
||||||
}
|
}
|
||||||
cert, err := tlsalpn01.ChallengeCert(sni, challenge.(string))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return cert, nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
targetOwner := ""
|
targetOwner := ""
|
||||||
if bytes.HasSuffix(sniBytes, MainDomainSuffix) || bytes.Equal(sniBytes, MainDomainSuffix[1:]) {
|
if bytes.HasSuffix(sniBytes, mainDomainSuffix) || bytes.Equal(sniBytes, mainDomainSuffix[1:]) {
|
||||||
// deliver default certificate for the main domain (*.codeberg.page)
|
// deliver default certificate for the main domain (*.codeberg.page)
|
||||||
sniBytes = MainDomainSuffix
|
sniBytes = mainDomainSuffix
|
||||||
sni = string(sniBytes)
|
|
||||||
} else {
|
|
||||||
var targetRepo, targetBranch string
|
|
||||||
targetOwner, targetRepo, targetBranch = getTargetFromDNS(sni)
|
|
||||||
if targetOwner == "" {
|
|
||||||
// DNS not set up, return main certificate to redirect to the docs
|
|
||||||
sniBytes = MainDomainSuffix
|
|
||||||
sni = string(sniBytes)
|
sni = string(sniBytes)
|
||||||
} else {
|
} else {
|
||||||
_, _ = targetRepo, targetBranch
|
var targetRepo, targetBranch string
|
||||||
_, valid := checkCanonicalDomain(targetOwner, targetRepo, targetBranch, sni)
|
targetOwner, targetRepo, targetBranch = getTargetFromDNS(sni, string(mainDomainSuffix))
|
||||||
if !valid {
|
if targetOwner == "" {
|
||||||
sniBytes = MainDomainSuffix
|
// DNS not set up, return main certificate to redirect to the docs
|
||||||
|
sniBytes = mainDomainSuffix
|
||||||
sni = string(sniBytes)
|
sni = string(sniBytes)
|
||||||
|
} else {
|
||||||
|
_, _ = targetRepo, targetBranch
|
||||||
|
_, valid := checkCanonicalDomain(targetOwner, targetRepo, targetBranch, sni, string(mainDomainSuffix), giteaRoot, giteaApiToken)
|
||||||
|
if !valid {
|
||||||
|
sniBytes = mainDomainSuffix
|
||||||
|
sni = string(sniBytes)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if tlsCertificate, ok := keyCache.Get(sni); ok {
|
if tlsCertificate, ok := keyCache.Get(sni); ok {
|
||||||
// we can use an existing certificate object
|
// we can use an existing certificate object
|
||||||
return tlsCertificate.(*tls.Certificate), nil
|
return tlsCertificate.(*tls.Certificate), nil
|
||||||
}
|
|
||||||
|
|
||||||
var tlsCertificate tls.Certificate
|
|
||||||
var err error
|
|
||||||
var ok bool
|
|
||||||
if tlsCertificate, ok = retrieveCertFromDB(sniBytes); !ok {
|
|
||||||
// request a new certificate
|
|
||||||
if bytes.Equal(sniBytes, MainDomainSuffix) {
|
|
||||||
return nil, errors.New("won't request certificate for main domain, something really bad has happened")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tlsCertificate, err = obtainCert(acmeClient, []string{sni}, nil, targetOwner)
|
var tlsCertificate tls.Certificate
|
||||||
|
var err error
|
||||||
|
var ok bool
|
||||||
|
if tlsCertificate, ok = retrieveCertFromDB(sniBytes, mainDomainSuffix); !ok {
|
||||||
|
// request a new certificate
|
||||||
|
if bytes.Equal(sniBytes, mainDomainSuffix) {
|
||||||
|
return nil, errors.New("won't request certificate for main domain, something really bad has happened")
|
||||||
|
}
|
||||||
|
|
||||||
|
tlsCertificate, err = obtainCert(acmeClient, []string{sni}, nil, targetOwner, mainDomainSuffix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = keyCache.Set(sni, &tlsCertificate, 15*time.Minute)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
return &tlsCertificate, nil
|
||||||
|
},
|
||||||
|
PreferServerCipherSuites: true,
|
||||||
|
NextProtos: []string{
|
||||||
|
"http/1.1",
|
||||||
|
tlsalpn01.ACMETLS1Protocol,
|
||||||
|
},
|
||||||
|
|
||||||
err = keyCache.Set(sni, &tlsCertificate, 15*time.Minute)
|
// generated 2021-07-13, Mozilla Guideline v5.6, Go 1.14.4, intermediate configuration
|
||||||
if err != nil {
|
// https://ssl-config.mozilla.org/#server=go&version=1.14.4&config=intermediate&guideline=5.6
|
||||||
panic(err)
|
MinVersion: tls.VersionTLS12,
|
||||||
}
|
CipherSuites: []uint16{
|
||||||
return &tlsCertificate, nil
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||||
},
|
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
PreferServerCipherSuites: true,
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||||
NextProtos: []string{
|
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
"http/1.1",
|
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
|
||||||
tlsalpn01.ACMETLS1Protocol,
|
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
|
||||||
},
|
},
|
||||||
|
}
|
||||||
// generated 2021-07-13, Mozilla Guideline v5.6, Go 1.14.4, intermediate configuration
|
|
||||||
// https://ssl-config.mozilla.org/#server=go&version=1.14.4&config=intermediate&guideline=5.6
|
|
||||||
MinVersion: tls.VersionTLS12,
|
|
||||||
CipherSuites: []uint16{
|
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
|
|
||||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: clean up & move to init
|
// TODO: clean up & move to init
|
||||||
var keyCache = mcache.New()
|
var keyCache = mcache.New()
|
||||||
var keyDatabase, keyDatabaseErr = pogreb.Open("key-database.pogreb", &pogreb.Options{
|
var KeyDatabase, KeyDatabaseErr = pogreb.Open("key-database.pogreb", &pogreb.Options{
|
||||||
BackgroundSyncInterval: 30 * time.Second,
|
BackgroundSyncInterval: 30 * time.Second,
|
||||||
BackgroundCompactionInterval: 6 * time.Hour,
|
BackgroundCompactionInterval: 6 * time.Hour,
|
||||||
FileSystem: fs.OSMMap,
|
FileSystem: fs.OSMMap,
|
||||||
@ -181,17 +183,17 @@ var acmeClientOrderLimit = equalizer.NewTokenBucket(25, 15*time.Minute)
|
|||||||
// rate limit is 20 / second, we want 5 / second (especially as one cert takes at least two requests)
|
// rate limit is 20 / second, we want 5 / second (especially as one cert takes at least two requests)
|
||||||
var acmeClientRequestLimit = equalizer.NewTokenBucket(5, 1*time.Second)
|
var acmeClientRequestLimit = equalizer.NewTokenBucket(5, 1*time.Second)
|
||||||
|
|
||||||
var challengeCache = mcache.New()
|
var ChallengeCache = mcache.New()
|
||||||
|
|
||||||
type AcmeTLSChallengeProvider struct{}
|
type AcmeTLSChallengeProvider struct{}
|
||||||
|
|
||||||
var _ challenge.Provider = AcmeTLSChallengeProvider{}
|
var _ challenge.Provider = AcmeTLSChallengeProvider{}
|
||||||
|
|
||||||
func (a AcmeTLSChallengeProvider) Present(domain, _, keyAuth string) error {
|
func (a AcmeTLSChallengeProvider) Present(domain, _, keyAuth string) error {
|
||||||
return challengeCache.Set(domain, keyAuth, 1*time.Hour)
|
return ChallengeCache.Set(domain, keyAuth, 1*time.Hour)
|
||||||
}
|
}
|
||||||
func (a AcmeTLSChallengeProvider) CleanUp(domain, _, _ string) error {
|
func (a AcmeTLSChallengeProvider) CleanUp(domain, _, _ string) error {
|
||||||
challengeCache.Remove(domain)
|
ChallengeCache.Remove(domain)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -200,17 +202,17 @@ type AcmeHTTPChallengeProvider struct{}
|
|||||||
var _ challenge.Provider = AcmeHTTPChallengeProvider{}
|
var _ challenge.Provider = AcmeHTTPChallengeProvider{}
|
||||||
|
|
||||||
func (a AcmeHTTPChallengeProvider) Present(domain, token, keyAuth string) error {
|
func (a AcmeHTTPChallengeProvider) Present(domain, token, keyAuth string) error {
|
||||||
return challengeCache.Set(domain+"/"+token, keyAuth, 1*time.Hour)
|
return ChallengeCache.Set(domain+"/"+token, keyAuth, 1*time.Hour)
|
||||||
}
|
}
|
||||||
func (a AcmeHTTPChallengeProvider) CleanUp(domain, token, _ string) error {
|
func (a AcmeHTTPChallengeProvider) CleanUp(domain, token, _ string) error {
|
||||||
challengeCache.Remove(domain + "/" + token)
|
ChallengeCache.Remove(domain + "/" + token)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func retrieveCertFromDB(sni []byte) (tls.Certificate, bool) {
|
func retrieveCertFromDB(sni, mainDomainSuffix []byte) (tls.Certificate, bool) {
|
||||||
// parse certificate from database
|
// parse certificate from database
|
||||||
res := &certificate.Resource{}
|
res := &certificate.Resource{}
|
||||||
if !PogrebGet(keyDatabase, sni, res) {
|
if !PogrebGet(KeyDatabase, sni, res) {
|
||||||
return tls.Certificate{}, false
|
return tls.Certificate{}, false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -220,7 +222,7 @@ func retrieveCertFromDB(sni []byte) (tls.Certificate, bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: document & put into own function
|
// TODO: document & put into own function
|
||||||
if !bytes.Equal(sni, MainDomainSuffix) {
|
if !bytes.Equal(sni, mainDomainSuffix) {
|
||||||
tlsCertificate.Leaf, err = x509.ParseCertificate(tlsCertificate.Certificate[0])
|
tlsCertificate.Leaf, err = x509.ParseCertificate(tlsCertificate.Certificate[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -238,7 +240,7 @@ func retrieveCertFromDB(sni []byte) (tls.Certificate, bool) {
|
|||||||
}
|
}
|
||||||
go (func() {
|
go (func() {
|
||||||
res.CSR = nil // acme client doesn't like CSR to be set
|
res.CSR = nil // acme client doesn't like CSR to be set
|
||||||
tlsCertificate, err = obtainCert(acmeClient, []string{string(sni)}, res, "")
|
tlsCertificate, err = obtainCert(acmeClient, []string{string(sni)}, res, "", mainDomainSuffix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Couldn't renew certificate for %s: %s", sni, err)
|
log.Printf("Couldn't renew certificate for %s: %s", sni, err)
|
||||||
}
|
}
|
||||||
@ -251,7 +253,7 @@ func retrieveCertFromDB(sni []byte) (tls.Certificate, bool) {
|
|||||||
|
|
||||||
var obtainLocks = sync.Map{}
|
var obtainLocks = sync.Map{}
|
||||||
|
|
||||||
func obtainCert(acmeClient *lego.Client, domains []string, renew *certificate.Resource, user string) (tls.Certificate, error) {
|
func obtainCert(acmeClient *lego.Client, domains []string, renew *certificate.Resource, user string, mainDomainSuffix []byte) (tls.Certificate, error) {
|
||||||
name := strings.TrimPrefix(domains[0], "*")
|
name := strings.TrimPrefix(domains[0], "*")
|
||||||
if os.Getenv("DNS_PROVIDER") == "" && len(domains[0]) > 0 && domains[0][0] == '*' {
|
if os.Getenv("DNS_PROVIDER") == "" && len(domains[0]) > 0 && domains[0][0] == '*' {
|
||||||
domains = domains[1:]
|
domains = domains[1:]
|
||||||
@ -264,7 +266,7 @@ func obtainCert(acmeClient *lego.Client, domains []string, renew *certificate.Re
|
|||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
_, working = obtainLocks.Load(name)
|
_, working = obtainLocks.Load(name)
|
||||||
}
|
}
|
||||||
cert, ok := retrieveCertFromDB([]byte(name))
|
cert, ok := retrieveCertFromDB([]byte(name), mainDomainSuffix)
|
||||||
if !ok {
|
if !ok {
|
||||||
return tls.Certificate{}, errors.New("certificate failed in synchronous request")
|
return tls.Certificate{}, errors.New("certificate failed in synchronous request")
|
||||||
}
|
}
|
||||||
@ -273,7 +275,7 @@ func obtainCert(acmeClient *lego.Client, domains []string, renew *certificate.Re
|
|||||||
defer obtainLocks.Delete(name)
|
defer obtainLocks.Delete(name)
|
||||||
|
|
||||||
if acmeClient == nil {
|
if acmeClient == nil {
|
||||||
return mockCert(domains[0], "ACME client uninitialized. This is a server error, please report!"), nil
|
return mockCert(domains[0], "ACME client uninitialized. This is a server error, please report!", string(mainDomainSuffix)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// request actual cert
|
// request actual cert
|
||||||
@ -315,15 +317,15 @@ func obtainCert(acmeClient *lego.Client, domains []string, renew *certificate.Re
|
|||||||
if err == nil && tlsCertificate.Leaf.NotAfter.After(time.Now()) {
|
if err == nil && tlsCertificate.Leaf.NotAfter.After(time.Now()) {
|
||||||
// avoid sending a mock cert instead of a still valid cert, instead abuse CSR field to store time to try again at
|
// avoid sending a mock cert instead of a still valid cert, instead abuse CSR field to store time to try again at
|
||||||
renew.CSR = []byte(strconv.FormatInt(time.Now().Add(6*time.Hour).Unix(), 10))
|
renew.CSR = []byte(strconv.FormatInt(time.Now().Add(6*time.Hour).Unix(), 10))
|
||||||
PogrebPut(keyDatabase, []byte(name), renew)
|
PogrebPut(KeyDatabase, []byte(name), renew)
|
||||||
return tlsCertificate, nil
|
return tlsCertificate, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return mockCert(domains[0], err.Error()), err
|
return mockCert(domains[0], err.Error(), string(mainDomainSuffix)), err
|
||||||
}
|
}
|
||||||
log.Printf("Obtained certificate for %v", domains)
|
log.Printf("Obtained certificate for %v", domains)
|
||||||
|
|
||||||
PogrebPut(keyDatabase, []byte(name), res)
|
PogrebPut(KeyDatabase, []byte(name), res)
|
||||||
tlsCertificate, err := tls.X509KeyPair(res.Certificate, res.PrivateKey)
|
tlsCertificate, err := tls.X509KeyPair(res.Certificate, res.PrivateKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tls.Certificate{}, err
|
return tls.Certificate{}, err
|
||||||
@ -331,7 +333,7 @@ func obtainCert(acmeClient *lego.Client, domains []string, renew *certificate.Re
|
|||||||
return tlsCertificate, nil
|
return tlsCertificate, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func mockCert(domain string, msg string) tls.Certificate {
|
func mockCert(domain, msg, mainDomainSuffix string) tls.Certificate {
|
||||||
key, err := certcrypto.GeneratePrivateKey(certcrypto.RSA2048)
|
key, err := certcrypto.GeneratePrivateKey(certcrypto.RSA2048)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -385,10 +387,10 @@ func mockCert(domain string, msg string) tls.Certificate {
|
|||||||
Domain: domain,
|
Domain: domain,
|
||||||
}
|
}
|
||||||
databaseName := domain
|
databaseName := domain
|
||||||
if domain == "*"+string(MainDomainSuffix) || domain == string(MainDomainSuffix[1:]) {
|
if domain == "*"+mainDomainSuffix || domain == mainDomainSuffix[1:] {
|
||||||
databaseName = string(MainDomainSuffix)
|
databaseName = mainDomainSuffix
|
||||||
}
|
}
|
||||||
PogrebPut(keyDatabase, []byte(databaseName), res)
|
PogrebPut(KeyDatabase, []byte(databaseName), res)
|
||||||
|
|
||||||
tlsCertificate, err := tls.X509KeyPair(res.Certificate, res.PrivateKey)
|
tlsCertificate, err := tls.X509KeyPair(res.Certificate, res.PrivateKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -397,9 +399,9 @@ func mockCert(domain string, msg string) tls.Certificate {
|
|||||||
return tlsCertificate
|
return tlsCertificate
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupCertificates() {
|
func SetupCertificates(mainDomainSuffix []byte) {
|
||||||
if keyDatabaseErr != nil {
|
if KeyDatabaseErr != nil {
|
||||||
panic(keyDatabaseErr)
|
panic(KeyDatabaseErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if os.Getenv("ACME_ACCEPT_TERMS") != "true" || (os.Getenv("DNS_PROVIDER") == "" && os.Getenv("ACME_API") != "https://acme.mock.directory") {
|
if os.Getenv("ACME_ACCEPT_TERMS") != "true" || (os.Getenv("DNS_PROVIDER") == "" && os.Getenv("ACME_API") != "https://acme.mock.directory") {
|
||||||
@ -407,7 +409,7 @@ func setupCertificates() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getting main cert before ACME account so that we can panic here on database failure without hitting rate limits
|
// getting main cert before ACME account so that we can panic here on database failure without hitting rate limits
|
||||||
mainCertBytes, err := keyDatabase.Get(MainDomainSuffix)
|
mainCertBytes, err := KeyDatabase.Get(mainDomainSuffix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// key database is not working
|
// key database is not working
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -423,7 +425,7 @@ func setupCertificates() {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
myAcmeConfig = lego.NewConfig(&myAcmeAccount)
|
myAcmeConfig = lego.NewConfig(&myAcmeAccount)
|
||||||
myAcmeConfig.CADirURL = envOr("ACME_API", "https://acme-v02.api.letsencrypt.org/directory")
|
myAcmeConfig.CADirURL = EnvOr("ACME_API", "https://acme-v02.api.letsencrypt.org/directory")
|
||||||
myAcmeConfig.Certificate.KeyType = certcrypto.RSA2048
|
myAcmeConfig.Certificate.KeyType = certcrypto.RSA2048
|
||||||
_, err := lego.NewClient(myAcmeConfig)
|
_, err := lego.NewClient(myAcmeConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -435,12 +437,12 @@ func setupCertificates() {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
myAcmeAccount = AcmeAccount{
|
myAcmeAccount = AcmeAccount{
|
||||||
Email: envOr("ACME_EMAIL", "noreply@example.email"),
|
Email: EnvOr("ACME_EMAIL", "noreply@example.email"),
|
||||||
Key: privateKey,
|
Key: privateKey,
|
||||||
KeyPEM: string(certcrypto.PEMEncode(privateKey)),
|
KeyPEM: string(certcrypto.PEMEncode(privateKey)),
|
||||||
}
|
}
|
||||||
myAcmeConfig = lego.NewConfig(&myAcmeAccount)
|
myAcmeConfig = lego.NewConfig(&myAcmeAccount)
|
||||||
myAcmeConfig.CADirURL = envOr("ACME_API", "https://acme-v02.api.letsencrypt.org/directory")
|
myAcmeConfig.CADirURL = EnvOr("ACME_API", "https://acme-v02.api.letsencrypt.org/directory")
|
||||||
myAcmeConfig.Certificate.KeyType = certcrypto.RSA2048
|
myAcmeConfig.Certificate.KeyType = certcrypto.RSA2048
|
||||||
tempClient, err := lego.NewClient(myAcmeConfig)
|
tempClient, err := lego.NewClient(myAcmeConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -523,7 +525,7 @@ func setupCertificates() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if mainCertBytes == nil {
|
if mainCertBytes == nil {
|
||||||
_, err = obtainCert(mainDomainAcmeClient, []string{"*" + string(MainDomainSuffix), string(MainDomainSuffix[1:])}, nil, "")
|
_, err = obtainCert(mainDomainAcmeClient, []string{"*" + string(mainDomainSuffix), string(mainDomainSuffix[1:])}, nil, "", mainDomainSuffix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[ERROR] Couldn't renew main domain certificate, continuing with mock certs only: %s", err)
|
log.Printf("[ERROR] Couldn't renew main domain certificate, continuing with mock certs only: %s", err)
|
||||||
}
|
}
|
||||||
@ -531,7 +533,7 @@ func setupCertificates() {
|
|||||||
|
|
||||||
go (func() {
|
go (func() {
|
||||||
for {
|
for {
|
||||||
err := keyDatabase.Sync()
|
err := KeyDatabase.Sync()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[ERROR] Syncing key database failed: %s", err)
|
log.Printf("[ERROR] Syncing key database failed: %s", err)
|
||||||
}
|
}
|
||||||
@ -544,10 +546,10 @@ func setupCertificates() {
|
|||||||
// clean up expired certs
|
// clean up expired certs
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
expiredCertCount := 0
|
expiredCertCount := 0
|
||||||
keyDatabaseIterator := keyDatabase.Items()
|
keyDatabaseIterator := KeyDatabase.Items()
|
||||||
key, resBytes, err := keyDatabaseIterator.Next()
|
key, resBytes, err := keyDatabaseIterator.Next()
|
||||||
for err == nil {
|
for err == nil {
|
||||||
if !bytes.Equal(key, MainDomainSuffix) {
|
if !bytes.Equal(key, mainDomainSuffix) {
|
||||||
resGob := bytes.NewBuffer(resBytes)
|
resGob := bytes.NewBuffer(resBytes)
|
||||||
resDec := gob.NewDecoder(resGob)
|
resDec := gob.NewDecoder(resGob)
|
||||||
res := &certificate.Resource{}
|
res := &certificate.Resource{}
|
||||||
@ -558,7 +560,7 @@ func setupCertificates() {
|
|||||||
|
|
||||||
tlsCertificates, err := certcrypto.ParsePEMBundle(res.Certificate)
|
tlsCertificates, err := certcrypto.ParsePEMBundle(res.Certificate)
|
||||||
if err != nil || !tlsCertificates[0].NotAfter.After(now) {
|
if err != nil || !tlsCertificates[0].NotAfter.After(now) {
|
||||||
err := keyDatabase.Delete(key)
|
err := KeyDatabase.Delete(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[ERROR] Deleting expired certificate for %s failed: %s", string(key), err)
|
log.Printf("[ERROR] Deleting expired certificate for %s failed: %s", string(key), err)
|
||||||
} else {
|
} else {
|
||||||
@ -571,7 +573,7 @@ func setupCertificates() {
|
|||||||
log.Printf("[INFO] Removed %d expired certificates from the database", expiredCertCount)
|
log.Printf("[INFO] Removed %d expired certificates from the database", expiredCertCount)
|
||||||
|
|
||||||
// compact the database
|
// compact the database
|
||||||
result, err := keyDatabase.Compact()
|
result, err := KeyDatabase.Compact()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[ERROR] Compacting key database failed: %s", err)
|
log.Printf("[ERROR] Compacting key database failed: %s", err)
|
||||||
} else {
|
} else {
|
||||||
@ -580,7 +582,7 @@ func setupCertificates() {
|
|||||||
|
|
||||||
// update main cert
|
// update main cert
|
||||||
res := &certificate.Resource{}
|
res := &certificate.Resource{}
|
||||||
if !PogrebGet(keyDatabase, MainDomainSuffix, res) {
|
if !PogrebGet(KeyDatabase, mainDomainSuffix, res) {
|
||||||
log.Printf("[ERROR] Couldn't renew certificate for main domain: %s", "expected main domain cert to exist, but it's missing - seems like the database is corrupted")
|
log.Printf("[ERROR] Couldn't renew certificate for main domain: %s", "expected main domain cert to exist, but it's missing - seems like the database is corrupted")
|
||||||
} else {
|
} else {
|
||||||
tlsCertificates, err := certcrypto.ParsePEMBundle(res.Certificate)
|
tlsCertificates, err := certcrypto.ParsePEMBundle(res.Certificate)
|
||||||
@ -588,7 +590,7 @@ func setupCertificates() {
|
|||||||
// renew main certificate 30 days before it expires
|
// renew main certificate 30 days before it expires
|
||||||
if !tlsCertificates[0].NotAfter.After(time.Now().Add(-30 * 24 * time.Hour)) {
|
if !tlsCertificates[0].NotAfter.After(time.Now().Add(-30 * 24 * time.Hour)) {
|
||||||
go (func() {
|
go (func() {
|
||||||
_, err = obtainCert(mainDomainAcmeClient, []string{"*" + string(MainDomainSuffix), string(MainDomainSuffix[1:])}, res, "")
|
_, err = obtainCert(mainDomainAcmeClient, []string{"*" + string(mainDomainSuffix), string(mainDomainSuffix[1:])}, res, "", mainDomainSuffix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[ERROR] Couldn't renew certificate for main domain: %s", err)
|
log.Printf("[ERROR] Couldn't renew certificate for main domain: %s", err)
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
package main
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/OrlovEvgeny/go-mcache"
|
"github.com/OrlovEvgeny/go-mcache"
|
||||||
@ -16,7 +16,7 @@ var dnsLookupCache = mcache.New()
|
|||||||
|
|
||||||
// getTargetFromDNS searches for CNAME or TXT entries on the request domain ending with MainDomainSuffix.
|
// getTargetFromDNS searches for CNAME or TXT entries on the request domain ending with MainDomainSuffix.
|
||||||
// If everything is fine, it returns the target data.
|
// If everything is fine, it returns the target data.
|
||||||
func getTargetFromDNS(domain string) (targetOwner, targetRepo, targetBranch string) {
|
func getTargetFromDNS(domain, mainDomainSuffix string) (targetOwner, targetRepo, targetBranch string) {
|
||||||
// Get CNAME or TXT
|
// Get CNAME or TXT
|
||||||
var cname string
|
var cname string
|
||||||
var err error
|
var err error
|
||||||
@ -25,14 +25,14 @@ func getTargetFromDNS(domain string) (targetOwner, targetRepo, targetBranch stri
|
|||||||
} else {
|
} else {
|
||||||
cname, err = net.LookupCNAME(domain)
|
cname, err = net.LookupCNAME(domain)
|
||||||
cname = strings.TrimSuffix(cname, ".")
|
cname = strings.TrimSuffix(cname, ".")
|
||||||
if err != nil || !strings.HasSuffix(cname, string(MainDomainSuffix)) {
|
if err != nil || !strings.HasSuffix(cname, mainDomainSuffix) {
|
||||||
cname = ""
|
cname = ""
|
||||||
// TODO: check if the A record matches!
|
// TODO: check if the A record matches!
|
||||||
names, err := net.LookupTXT(domain)
|
names, err := net.LookupTXT(domain)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
name = strings.TrimSuffix(name, ".")
|
name = strings.TrimSuffix(name, ".")
|
||||||
if strings.HasSuffix(name, string(MainDomainSuffix)) {
|
if strings.HasSuffix(name, mainDomainSuffix) {
|
||||||
cname = name
|
cname = name
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -44,7 +44,7 @@ func getTargetFromDNS(domain string) (targetOwner, targetRepo, targetBranch stri
|
|||||||
if cname == "" {
|
if cname == "" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cnameParts := strings.Split(strings.TrimSuffix(cname, string(MainDomainSuffix)), ".")
|
cnameParts := strings.Split(strings.TrimSuffix(cname, mainDomainSuffix), ".")
|
||||||
targetOwner = cnameParts[len(cnameParts)-1]
|
targetOwner = cnameParts[len(cnameParts)-1]
|
||||||
if len(cnameParts) > 1 {
|
if len(cnameParts) > 1 {
|
||||||
targetRepo = cnameParts[len(cnameParts)-2]
|
targetRepo = cnameParts[len(cnameParts)-2]
|
||||||
@ -69,7 +69,7 @@ var CanonicalDomainCacheTimeout = 15 * time.Minute
|
|||||||
var canonicalDomainCache = mcache.New()
|
var canonicalDomainCache = mcache.New()
|
||||||
|
|
||||||
// checkCanonicalDomain returns the canonical domain specified in the repo (using the file `.canonical-domain`).
|
// checkCanonicalDomain returns the canonical domain specified in the repo (using the file `.canonical-domain`).
|
||||||
func checkCanonicalDomain(targetOwner, targetRepo, targetBranch, actualDomain string) (canonicalDomain string, valid bool) {
|
func checkCanonicalDomain(targetOwner, targetRepo, targetBranch, actualDomain, mainDomainSuffix, giteaRoot, giteaApiToken string) (canonicalDomain string, valid bool) {
|
||||||
domains := []string{}
|
domains := []string{}
|
||||||
if cachedValue, ok := canonicalDomainCache.Get(targetOwner + "/" + targetRepo + "/" + targetBranch); ok {
|
if cachedValue, ok := canonicalDomainCache.Get(targetOwner + "/" + targetRepo + "/" + targetBranch); ok {
|
||||||
domains = cachedValue.([]string)
|
domains = cachedValue.([]string)
|
||||||
@ -81,7 +81,7 @@ func checkCanonicalDomain(targetOwner, targetRepo, targetBranch, actualDomain st
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
req := fasthttp.AcquireRequest()
|
req := fasthttp.AcquireRequest()
|
||||||
req.SetRequestURI(string(GiteaRoot) + "/api/v1/repos/" + targetOwner + "/" + targetRepo + "/raw/" + targetBranch + "/.domains" + "?access_token=" + GiteaApiToken)
|
req.SetRequestURI(giteaRoot + "/api/v1/repos/" + targetOwner + "/" + targetRepo + "/raw/" + targetBranch + "/.domains" + "?access_token=" + giteaApiToken)
|
||||||
res := fasthttp.AcquireResponse()
|
res := fasthttp.AcquireResponse()
|
||||||
|
|
||||||
err := upstreamClient.Do(req, res)
|
err := upstreamClient.Do(req, res)
|
||||||
@ -99,7 +99,7 @@ func checkCanonicalDomain(targetOwner, targetRepo, targetBranch, actualDomain st
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
domains = append(domains, targetOwner+string(MainDomainSuffix))
|
domains = append(domains, targetOwner+mainDomainSuffix)
|
||||||
if domains[len(domains)-1] == actualDomain {
|
if domains[len(domains)-1] == actualDomain {
|
||||||
valid = true
|
valid = true
|
||||||
}
|
}
|
553
server/handler.go
Normal file
553
server/handler.go
Normal file
@ -0,0 +1,553 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"mime"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/OrlovEvgeny/go-mcache"
|
||||||
|
"github.com/rs/zerolog/log"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
"github.com/valyala/fastjson"
|
||||||
|
|
||||||
|
"codeberg.org/codeberg/pages/html"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Handler handles a single HTTP request to the web server.
|
||||||
|
func Handler(mainDomainSuffix, rawDomain, giteaRoot []byte, rawInfoPage, giteaApiToken string, blacklistedPaths, allowedCorsDomains [][]byte) func(ctx *fasthttp.RequestCtx) {
|
||||||
|
return func(ctx *fasthttp.RequestCtx) {
|
||||||
|
log := log.With().Str("Handler", string(ctx.Request.Header.RequestURI())).Logger()
|
||||||
|
|
||||||
|
ctx.Response.Header.Set("Server", "Codeberg Pages")
|
||||||
|
|
||||||
|
// Force new default from specification (since November 2020) - see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy#strict-origin-when-cross-origin
|
||||||
|
ctx.Response.Header.Set("Referrer-Policy", "strict-origin-when-cross-origin")
|
||||||
|
|
||||||
|
// Enable browser caching for up to 10 minutes
|
||||||
|
ctx.Response.Header.Set("Cache-Control", "public, max-age=600")
|
||||||
|
|
||||||
|
trimmedHost := TrimHostPort(ctx.Request.Host())
|
||||||
|
|
||||||
|
// Add HSTS for RawDomain and MainDomainSuffix
|
||||||
|
if hsts := GetHSTSHeader(trimmedHost, mainDomainSuffix, rawDomain); hsts != "" {
|
||||||
|
ctx.Response.Header.Set("Strict-Transport-Security", hsts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Block all methods not required for static pages
|
||||||
|
if !ctx.IsGet() && !ctx.IsHead() && !ctx.IsOptions() {
|
||||||
|
ctx.Response.Header.Set("Allow", "GET, HEAD, OPTIONS")
|
||||||
|
ctx.Error("Method not allowed", fasthttp.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Block blacklisted paths (like ACME challenges)
|
||||||
|
for _, blacklistedPath := range blacklistedPaths {
|
||||||
|
if bytes.HasPrefix(ctx.Path(), blacklistedPath) {
|
||||||
|
returnErrorPage(ctx, fasthttp.StatusForbidden)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allow CORS for specified domains
|
||||||
|
if ctx.IsOptions() {
|
||||||
|
allowCors := false
|
||||||
|
for _, allowedCorsDomain := range allowedCorsDomains {
|
||||||
|
if bytes.Equal(trimmedHost, allowedCorsDomain) {
|
||||||
|
allowCors = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if allowCors {
|
||||||
|
ctx.Response.Header.Set("Access-Control-Allow-Origin", "*")
|
||||||
|
ctx.Response.Header.Set("Access-Control-Allow-Methods", "GET, HEAD")
|
||||||
|
}
|
||||||
|
ctx.Response.Header.Set("Allow", "GET, HEAD, OPTIONS")
|
||||||
|
ctx.Response.Header.SetStatusCode(fasthttp.StatusNoContent)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare request information to Gitea
|
||||||
|
var targetOwner, targetRepo, targetBranch, targetPath string
|
||||||
|
var targetOptions = &upstreamOptions{
|
||||||
|
ForbiddenMimeTypes: map[string]struct{}{},
|
||||||
|
TryIndexPages: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// tryBranch checks if a branch exists and populates the target variables. If canonicalLink is non-empty, it will
|
||||||
|
// also disallow search indexing and add a Link header to the canonical URL.
|
||||||
|
var tryBranch = func(repo string, branch string, path []string, canonicalLink string) bool {
|
||||||
|
if repo == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the branch exists, otherwise treat it as a file path
|
||||||
|
branchTimestampResult := getBranchTimestamp(targetOwner, repo, branch, string(giteaRoot), giteaApiToken)
|
||||||
|
if branchTimestampResult == nil {
|
||||||
|
// branch doesn't exist
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Branch exists, use it
|
||||||
|
targetRepo = repo
|
||||||
|
targetPath = strings.Trim(strings.Join(path, "/"), "/")
|
||||||
|
targetBranch = branchTimestampResult.branch
|
||||||
|
|
||||||
|
targetOptions.BranchTimestamp = branchTimestampResult.timestamp
|
||||||
|
|
||||||
|
if canonicalLink != "" {
|
||||||
|
// Hide from search machines & add canonical link
|
||||||
|
ctx.Response.Header.Set("X-Robots-Tag", "noarchive, noindex")
|
||||||
|
ctx.Response.Header.Set("Link",
|
||||||
|
strings.NewReplacer("%b", targetBranch, "%p", targetPath).Replace(canonicalLink)+
|
||||||
|
"; rel=\"canonical\"",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// tryUpstream forwards the target request to the Gitea API, and shows an error page on failure.
|
||||||
|
var tryUpstream = func() {
|
||||||
|
// check if a canonical domain exists on a request on MainDomain
|
||||||
|
if bytes.HasSuffix(trimmedHost, mainDomainSuffix) {
|
||||||
|
canonicalDomain, _ := checkCanonicalDomain(targetOwner, targetRepo, targetBranch, "", string(mainDomainSuffix), string(giteaRoot), giteaApiToken)
|
||||||
|
if !strings.HasSuffix(strings.SplitN(canonicalDomain, "/", 2)[0], string(mainDomainSuffix)) {
|
||||||
|
canonicalPath := string(ctx.RequestURI())
|
||||||
|
if targetRepo != "pages" {
|
||||||
|
canonicalPath = "/" + strings.SplitN(canonicalPath, "/", 3)[2]
|
||||||
|
}
|
||||||
|
ctx.Redirect("https://"+canonicalDomain+canonicalPath, fasthttp.StatusTemporaryRedirect)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to request the file from the Gitea API
|
||||||
|
if !upstream(ctx, targetOwner, targetRepo, targetBranch, targetPath, string(giteaRoot), giteaApiToken, targetOptions) {
|
||||||
|
returnErrorPage(ctx, ctx.Response.StatusCode())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug().Msg("preparations")
|
||||||
|
|
||||||
|
if rawDomain != nil && bytes.Equal(trimmedHost, rawDomain) {
|
||||||
|
// Serve raw content from RawDomain
|
||||||
|
log.Debug().Msg("raw domain")
|
||||||
|
|
||||||
|
targetOptions.TryIndexPages = false
|
||||||
|
targetOptions.ForbiddenMimeTypes["text/html"] = struct{}{}
|
||||||
|
targetOptions.DefaultMimeType = "text/plain; charset=utf-8"
|
||||||
|
|
||||||
|
pathElements := strings.Split(string(bytes.Trim(ctx.Request.URI().Path(), "/")), "/")
|
||||||
|
if len(pathElements) < 2 {
|
||||||
|
// https://{RawDomain}/{owner}/{repo}[/@{branch}]/{path} is required
|
||||||
|
ctx.Redirect(rawInfoPage, fasthttp.StatusTemporaryRedirect)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
targetOwner = pathElements[0]
|
||||||
|
targetRepo = pathElements[1]
|
||||||
|
|
||||||
|
// raw.codeberg.org/example/myrepo/@main/index.html
|
||||||
|
if len(pathElements) > 2 && strings.HasPrefix(pathElements[2], "@") {
|
||||||
|
log.Debug().Msg("raw domain preparations, now trying with specified branch")
|
||||||
|
if tryBranch(targetRepo, pathElements[2][1:], pathElements[3:],
|
||||||
|
string(giteaRoot)+"/"+targetOwner+"/"+targetRepo+"/src/branch/%b/%p",
|
||||||
|
) {
|
||||||
|
log.Debug().Msg("tryBranch, now trying upstream")
|
||||||
|
tryUpstream()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Debug().Msg("missing branch")
|
||||||
|
returnErrorPage(ctx, fasthttp.StatusFailedDependency)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
log.Debug().Msg("raw domain preparations, now trying with default branch")
|
||||||
|
tryBranch(targetRepo, "", pathElements[2:],
|
||||||
|
string(giteaRoot)+"/"+targetOwner+"/"+targetRepo+"/src/branch/%b/%p",
|
||||||
|
)
|
||||||
|
log.Debug().Msg("tryBranch, now trying upstream")
|
||||||
|
tryUpstream()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if bytes.HasSuffix(trimmedHost, mainDomainSuffix) {
|
||||||
|
// Serve pages from subdomains of MainDomainSuffix
|
||||||
|
log.Debug().Msg("main domain suffix")
|
||||||
|
|
||||||
|
pathElements := strings.Split(string(bytes.Trim(ctx.Request.URI().Path(), "/")), "/")
|
||||||
|
targetOwner = string(bytes.TrimSuffix(trimmedHost, mainDomainSuffix))
|
||||||
|
targetRepo = pathElements[0]
|
||||||
|
targetPath = strings.Trim(strings.Join(pathElements[1:], "/"), "/")
|
||||||
|
|
||||||
|
if targetOwner == "www" {
|
||||||
|
// www.codeberg.page redirects to codeberg.page
|
||||||
|
ctx.Redirect("https://"+string(mainDomainSuffix[1:])+string(ctx.Path()), fasthttp.StatusPermanentRedirect)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the first directory is a repo with the second directory as a branch
|
||||||
|
// example.codeberg.page/myrepo/@main/index.html
|
||||||
|
if len(pathElements) > 1 && strings.HasPrefix(pathElements[1], "@") {
|
||||||
|
if targetRepo == "pages" {
|
||||||
|
// example.codeberg.org/pages/@... redirects to example.codeberg.org/@...
|
||||||
|
ctx.Redirect("/"+strings.Join(pathElements[1:], "/"), fasthttp.StatusTemporaryRedirect)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug().Msg("main domain preparations, now trying with specified repo & branch")
|
||||||
|
if tryBranch(pathElements[0], pathElements[1][1:], pathElements[2:],
|
||||||
|
"/"+pathElements[0]+"/%p",
|
||||||
|
) {
|
||||||
|
log.Debug().Msg("tryBranch, now trying upstream")
|
||||||
|
tryUpstream()
|
||||||
|
} else {
|
||||||
|
returnErrorPage(ctx, fasthttp.StatusFailedDependency)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the first directory is a branch for the "pages" repo
|
||||||
|
// example.codeberg.page/@main/index.html
|
||||||
|
if strings.HasPrefix(pathElements[0], "@") {
|
||||||
|
log.Debug().Msg("main domain preparations, now trying with specified branch")
|
||||||
|
if tryBranch("pages", pathElements[0][1:], pathElements[1:], "/%p") {
|
||||||
|
log.Debug().Msg("tryBranch, now trying upstream")
|
||||||
|
tryUpstream()
|
||||||
|
} else {
|
||||||
|
returnErrorPage(ctx, fasthttp.StatusFailedDependency)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the first directory is a repo with a "pages" branch
|
||||||
|
// example.codeberg.page/myrepo/index.html
|
||||||
|
// example.codeberg.page/pages/... is not allowed here.
|
||||||
|
log.Debug().Msg("main domain preparations, now trying with specified repo")
|
||||||
|
if pathElements[0] != "pages" && tryBranch(pathElements[0], "pages", pathElements[1:], "") {
|
||||||
|
log.Debug().Msg("tryBranch, now trying upstream")
|
||||||
|
tryUpstream()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to use the "pages" repo on its default branch
|
||||||
|
// example.codeberg.page/index.html
|
||||||
|
log.Debug().Msg("main domain preparations, now trying with default repo/branch")
|
||||||
|
if tryBranch("pages", "", pathElements, "") {
|
||||||
|
log.Debug().Msg("tryBranch, now trying upstream")
|
||||||
|
tryUpstream()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Couldn't find a valid repo/branch
|
||||||
|
returnErrorPage(ctx, fasthttp.StatusFailedDependency)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
trimmedHostStr := string(trimmedHost)
|
||||||
|
|
||||||
|
// Serve pages from external domains
|
||||||
|
targetOwner, targetRepo, targetBranch = getTargetFromDNS(trimmedHostStr, string(mainDomainSuffix))
|
||||||
|
if targetOwner == "" {
|
||||||
|
returnErrorPage(ctx, fasthttp.StatusFailedDependency)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
pathElements := strings.Split(string(bytes.Trim(ctx.Request.URI().Path(), "/")), "/")
|
||||||
|
canonicalLink := ""
|
||||||
|
if strings.HasPrefix(pathElements[0], "@") {
|
||||||
|
targetBranch = pathElements[0][1:]
|
||||||
|
pathElements = pathElements[1:]
|
||||||
|
canonicalLink = "/%p"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to use the given repo on the given branch or the default branch
|
||||||
|
log.Debug().Msg("custom domain preparations, now trying with details from DNS")
|
||||||
|
if tryBranch(targetRepo, targetBranch, pathElements, canonicalLink) {
|
||||||
|
canonicalDomain, valid := checkCanonicalDomain(targetOwner, targetRepo, targetBranch, trimmedHostStr, string(mainDomainSuffix), string(giteaRoot), giteaApiToken)
|
||||||
|
if !valid {
|
||||||
|
returnErrorPage(ctx, fasthttp.StatusMisdirectedRequest)
|
||||||
|
return
|
||||||
|
} else if canonicalDomain != trimmedHostStr {
|
||||||
|
// only redirect if the target is also a codeberg page!
|
||||||
|
targetOwner, _, _ = getTargetFromDNS(strings.SplitN(canonicalDomain, "/", 2)[0], string(mainDomainSuffix))
|
||||||
|
if targetOwner != "" {
|
||||||
|
ctx.Redirect("https://"+canonicalDomain+string(ctx.RequestURI()), fasthttp.StatusTemporaryRedirect)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
returnErrorPage(ctx, fasthttp.StatusFailedDependency)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug().Msg("tryBranch, now trying upstream")
|
||||||
|
tryUpstream()
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
returnErrorPage(ctx, fasthttp.StatusFailedDependency)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// returnErrorPage sets the response status code and writes NotFoundPage to the response body, with "%status" replaced
|
||||||
|
// with the provided status code.
|
||||||
|
func returnErrorPage(ctx *fasthttp.RequestCtx, code int) {
|
||||||
|
ctx.Response.SetStatusCode(code)
|
||||||
|
ctx.Response.Header.SetContentType("text/html; charset=utf-8")
|
||||||
|
message := fasthttp.StatusMessage(code)
|
||||||
|
if code == fasthttp.StatusMisdirectedRequest {
|
||||||
|
message += " - domain not specified in <code>.domains</code> file"
|
||||||
|
}
|
||||||
|
if code == fasthttp.StatusFailedDependency {
|
||||||
|
message += " - target repo/branch doesn't exist or is private"
|
||||||
|
}
|
||||||
|
// TODO: use template engine?
|
||||||
|
ctx.Response.SetBody(bytes.ReplaceAll(html.NotFoundPage, []byte("%status"), []byte(strconv.Itoa(code)+" "+message)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultBranchCacheTimeout specifies the timeout for the default branch cache. It can be quite long.
|
||||||
|
var DefaultBranchCacheTimeout = 15 * time.Minute
|
||||||
|
|
||||||
|
// BranchExistanceCacheTimeout specifies the timeout for the branch timestamp & existance cache. It should be shorter
|
||||||
|
// than FileCacheTimeout, as that gets invalidated if the branch timestamp has changed. That way, repo changes will be
|
||||||
|
// picked up faster, while still allowing the content to be cached longer if nothing changes.
|
||||||
|
var BranchExistanceCacheTimeout = 5 * time.Minute
|
||||||
|
|
||||||
|
// branchTimestampCache stores branch timestamps for faster cache checking
|
||||||
|
var branchTimestampCache = mcache.New()
|
||||||
|
|
||||||
|
type branchTimestamp struct {
|
||||||
|
branch string
|
||||||
|
timestamp time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileCacheTimeout specifies the timeout for the file content cache - you might want to make this quite long, depending
|
||||||
|
// on your available memory.
|
||||||
|
var FileCacheTimeout = 5 * time.Minute
|
||||||
|
|
||||||
|
// FileCacheSizeLimit limits the maximum file size that will be cached, and is set to 1 MB by default.
|
||||||
|
var FileCacheSizeLimit = 1024 * 1024
|
||||||
|
|
||||||
|
// fileResponseCache stores responses from the Gitea server
|
||||||
|
// TODO: make this an MRU cache with a size limit
|
||||||
|
var fileResponseCache = mcache.New()
|
||||||
|
|
||||||
|
type fileResponse struct {
|
||||||
|
exists bool
|
||||||
|
mimeType string
|
||||||
|
body []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBranchTimestamp finds the default branch (if branch is "") and returns the last modification time of the branch
|
||||||
|
// (or nil if the branch doesn't exist)
|
||||||
|
func getBranchTimestamp(owner, repo, branch, giteaRoot, giteaApiToken string) *branchTimestamp {
|
||||||
|
if result, ok := branchTimestampCache.Get(owner + "/" + repo + "/" + branch); ok {
|
||||||
|
if result == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return result.(*branchTimestamp)
|
||||||
|
}
|
||||||
|
result := &branchTimestamp{}
|
||||||
|
result.branch = branch
|
||||||
|
if branch == "" {
|
||||||
|
// Get default branch
|
||||||
|
var body = make([]byte, 0)
|
||||||
|
// TODO: use header for API key?
|
||||||
|
status, body, err := fasthttp.GetTimeout(body, giteaRoot+"/api/v1/repos/"+owner+"/"+repo+"?access_token="+giteaApiToken, 5*time.Second)
|
||||||
|
if err != nil || status != 200 {
|
||||||
|
_ = branchTimestampCache.Set(owner+"/"+repo+"/"+branch, nil, DefaultBranchCacheTimeout)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
result.branch = fastjson.GetString(body, "default_branch")
|
||||||
|
}
|
||||||
|
|
||||||
|
var body = make([]byte, 0)
|
||||||
|
status, body, err := fasthttp.GetTimeout(body, giteaRoot+"/api/v1/repos/"+owner+"/"+repo+"/branches/"+branch+"?access_token="+giteaApiToken, 5*time.Second)
|
||||||
|
if err != nil || status != 200 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
result.timestamp, _ = time.Parse(time.RFC3339, fastjson.GetString(body, "commit", "timestamp"))
|
||||||
|
_ = branchTimestampCache.Set(owner+"/"+repo+"/"+branch, result, BranchExistanceCacheTimeout)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
var upstreamClient = fasthttp.Client{
|
||||||
|
ReadTimeout: 10 * time.Second,
|
||||||
|
MaxConnDuration: 60 * time.Second,
|
||||||
|
MaxConnWaitTimeout: 1000 * time.Millisecond,
|
||||||
|
MaxConnsPerHost: 128 * 16, // TODO: adjust bottlenecks for best performance with Gitea!
|
||||||
|
}
|
||||||
|
|
||||||
|
// upstreamIndexPages lists pages that may be considered as index pages for directories.
|
||||||
|
var upstreamIndexPages = []string{
|
||||||
|
"index.html",
|
||||||
|
}
|
||||||
|
|
||||||
|
// upstream requests a file from the Gitea API at GiteaRoot and writes it to the request context.
|
||||||
|
func upstream(ctx *fasthttp.RequestCtx, targetOwner, targetRepo, targetBranch, targetPath, giteaRoot, giteaApiToken string, options *upstreamOptions) (final bool) {
|
||||||
|
log := log.With().Strs("upstream", []string{targetOwner, targetRepo, targetBranch, targetPath}).Logger()
|
||||||
|
|
||||||
|
if options.ForbiddenMimeTypes == nil {
|
||||||
|
options.ForbiddenMimeTypes = map[string]struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the branch exists and when it was modified
|
||||||
|
if options.BranchTimestamp == (time.Time{}) {
|
||||||
|
branch := getBranchTimestamp(targetOwner, targetRepo, targetBranch, giteaRoot, giteaApiToken)
|
||||||
|
|
||||||
|
if branch == nil {
|
||||||
|
returnErrorPage(ctx, fasthttp.StatusFailedDependency)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
targetBranch = branch.branch
|
||||||
|
options.BranchTimestamp = branch.timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
if targetOwner == "" || targetRepo == "" || targetBranch == "" {
|
||||||
|
returnErrorPage(ctx, fasthttp.StatusBadRequest)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the browser has a cached version
|
||||||
|
if ifModifiedSince, err := time.Parse(time.RFC1123, string(ctx.Request.Header.Peek("If-Modified-Since"))); err == nil {
|
||||||
|
if !ifModifiedSince.Before(options.BranchTimestamp) {
|
||||||
|
ctx.Response.SetStatusCode(fasthttp.StatusNotModified)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Debug().Msg("preparations")
|
||||||
|
|
||||||
|
// Make a GET request to the upstream URL
|
||||||
|
uri := targetOwner + "/" + targetRepo + "/raw/" + targetBranch + "/" + targetPath
|
||||||
|
var req *fasthttp.Request
|
||||||
|
var res *fasthttp.Response
|
||||||
|
var cachedResponse fileResponse
|
||||||
|
var err error
|
||||||
|
if cachedValue, ok := fileResponseCache.Get(uri + "?timestamp=" + strconv.FormatInt(options.BranchTimestamp.Unix(), 10)); ok && len(cachedValue.(fileResponse).body) > 0 {
|
||||||
|
cachedResponse = cachedValue.(fileResponse)
|
||||||
|
} else {
|
||||||
|
req = fasthttp.AcquireRequest()
|
||||||
|
req.SetRequestURI(giteaRoot + "/api/v1/repos/" + uri + "?access_token=" + giteaApiToken)
|
||||||
|
res = fasthttp.AcquireResponse()
|
||||||
|
res.SetBodyStream(&strings.Reader{}, -1)
|
||||||
|
err = upstreamClient.Do(req, res)
|
||||||
|
}
|
||||||
|
log.Debug().Msg("acquisition")
|
||||||
|
|
||||||
|
// Handle errors
|
||||||
|
if (res == nil && !cachedResponse.exists) || (res != nil && res.StatusCode() == fasthttp.StatusNotFound) {
|
||||||
|
if options.TryIndexPages {
|
||||||
|
// copy the options struct & try if an index page exists
|
||||||
|
optionsForIndexPages := *options
|
||||||
|
optionsForIndexPages.TryIndexPages = false
|
||||||
|
optionsForIndexPages.AppendTrailingSlash = true
|
||||||
|
for _, indexPage := range upstreamIndexPages {
|
||||||
|
if upstream(ctx, targetOwner, targetRepo, targetBranch, strings.TrimSuffix(targetPath, "/")+"/"+indexPage, giteaRoot, giteaApiToken, &optionsForIndexPages) {
|
||||||
|
_ = fileResponseCache.Set(uri+"?timestamp="+strconv.FormatInt(options.BranchTimestamp.Unix(), 10), fileResponse{
|
||||||
|
exists: false,
|
||||||
|
}, FileCacheTimeout)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// compatibility fix for GitHub Pages (/example → /example.html)
|
||||||
|
optionsForIndexPages.AppendTrailingSlash = false
|
||||||
|
optionsForIndexPages.RedirectIfExists = string(ctx.Request.URI().Path()) + ".html"
|
||||||
|
if upstream(ctx, targetOwner, targetRepo, targetBranch, targetPath+".html", giteaRoot, giteaApiToken, &optionsForIndexPages) {
|
||||||
|
_ = fileResponseCache.Set(uri+"?timestamp="+strconv.FormatInt(options.BranchTimestamp.Unix(), 10), fileResponse{
|
||||||
|
exists: false,
|
||||||
|
}, FileCacheTimeout)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ctx.Response.SetStatusCode(fasthttp.StatusNotFound)
|
||||||
|
if res != nil {
|
||||||
|
// Update cache if the request is fresh
|
||||||
|
_ = fileResponseCache.Set(uri+"?timestamp="+strconv.FormatInt(options.BranchTimestamp.Unix(), 10), fileResponse{
|
||||||
|
exists: false,
|
||||||
|
}, FileCacheTimeout)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if res != nil && (err != nil || res.StatusCode() != fasthttp.StatusOK) {
|
||||||
|
fmt.Printf("Couldn't fetch contents from \"%s\": %s (status code %d)\n", req.RequestURI(), err, res.StatusCode())
|
||||||
|
returnErrorPage(ctx, fasthttp.StatusInternalServerError)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append trailing slash if missing (for index files), and redirect to fix filenames in general
|
||||||
|
// options.AppendTrailingSlash is only true when looking for index pages
|
||||||
|
if options.AppendTrailingSlash && !bytes.HasSuffix(ctx.Request.URI().Path(), []byte{'/'}) {
|
||||||
|
ctx.Redirect(string(ctx.Request.URI().Path())+"/", fasthttp.StatusTemporaryRedirect)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if bytes.HasSuffix(ctx.Request.URI().Path(), []byte("/index.html")) {
|
||||||
|
ctx.Redirect(strings.TrimSuffix(string(ctx.Request.URI().Path()), "index.html"), fasthttp.StatusTemporaryRedirect)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if options.RedirectIfExists != "" {
|
||||||
|
ctx.Redirect(options.RedirectIfExists, fasthttp.StatusTemporaryRedirect)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
log.Debug().Msg("error handling")
|
||||||
|
|
||||||
|
// Set the MIME type
|
||||||
|
mimeType := mime.TypeByExtension(path.Ext(targetPath))
|
||||||
|
mimeTypeSplit := strings.SplitN(mimeType, ";", 2)
|
||||||
|
if _, ok := options.ForbiddenMimeTypes[mimeTypeSplit[0]]; ok || mimeType == "" {
|
||||||
|
if options.DefaultMimeType != "" {
|
||||||
|
mimeType = options.DefaultMimeType
|
||||||
|
} else {
|
||||||
|
mimeType = "application/octet-stream"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ctx.Response.Header.SetContentType(mimeType)
|
||||||
|
|
||||||
|
// Everything's okay so far
|
||||||
|
ctx.Response.SetStatusCode(fasthttp.StatusOK)
|
||||||
|
ctx.Response.Header.SetLastModified(options.BranchTimestamp)
|
||||||
|
|
||||||
|
log.Debug().Msg("response preparations")
|
||||||
|
|
||||||
|
// Write the response body to the original request
|
||||||
|
var cacheBodyWriter bytes.Buffer
|
||||||
|
if res != nil {
|
||||||
|
if res.Header.ContentLength() > FileCacheSizeLimit {
|
||||||
|
err = res.BodyWriteTo(ctx.Response.BodyWriter())
|
||||||
|
} else {
|
||||||
|
// TODO: cache is half-empty if request is cancelled - does the ctx.Err() below do the trick?
|
||||||
|
err = res.BodyWriteTo(io.MultiWriter(ctx.Response.BodyWriter(), &cacheBodyWriter))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
_, err = ctx.Write(cachedResponse.body)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Couldn't write body for \"%s\": %s\n", req.RequestURI(), err)
|
||||||
|
returnErrorPage(ctx, fasthttp.StatusInternalServerError)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
log.Debug().Msg("response")
|
||||||
|
|
||||||
|
if res != nil && ctx.Err() == nil {
|
||||||
|
cachedResponse.exists = true
|
||||||
|
cachedResponse.mimeType = mimeType
|
||||||
|
cachedResponse.body = cacheBodyWriter.Bytes()
|
||||||
|
_ = fileResponseCache.Set(uri+"?timestamp="+strconv.FormatInt(options.BranchTimestamp.Unix(), 10), cachedResponse, FileCacheTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// upstreamOptions provides various options for the upstream request.
|
||||||
|
type upstreamOptions struct {
|
||||||
|
DefaultMimeType string
|
||||||
|
ForbiddenMimeTypes map[string]struct{}
|
||||||
|
TryIndexPages bool
|
||||||
|
AppendTrailingSlash bool
|
||||||
|
RedirectIfExists string
|
||||||
|
BranchTimestamp time.Time
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
package main
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -8,6 +8,16 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestHandlerPerformance(t *testing.T) {
|
func TestHandlerPerformance(t *testing.T) {
|
||||||
|
testHandler := Handler(
|
||||||
|
[]byte("codeberg.page"),
|
||||||
|
[]byte("raw.codeberg.org"),
|
||||||
|
[]byte("https://codeberg.org"),
|
||||||
|
"https://docs.codeberg.org/pages/raw-content/",
|
||||||
|
"",
|
||||||
|
[][]byte{[]byte("/.well-known/acme-challenge/")},
|
||||||
|
[][]byte{[]byte("raw.codeberg.org"), []byte("fonts.codeberg.org"), []byte("design.codeberg.org")},
|
||||||
|
)
|
||||||
|
|
||||||
ctx := &fasthttp.RequestCtx{
|
ctx := &fasthttp.RequestCtx{
|
||||||
Request: *fasthttp.AcquireRequest(),
|
Request: *fasthttp.AcquireRequest(),
|
||||||
Response: *fasthttp.AcquireResponse(),
|
Response: *fasthttp.AcquireResponse(),
|
||||||
@ -15,7 +25,7 @@ func TestHandlerPerformance(t *testing.T) {
|
|||||||
ctx.Request.SetRequestURI("http://mondstern.codeberg.page/")
|
ctx.Request.SetRequestURI("http://mondstern.codeberg.page/")
|
||||||
fmt.Printf("Start: %v\n", time.Now())
|
fmt.Printf("Start: %v\n", time.Now())
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
handler(ctx)
|
testHandler(ctx)
|
||||||
end := time.Now()
|
end := time.Now()
|
||||||
fmt.Printf("Done: %v\n", time.Now())
|
fmt.Printf("Done: %v\n", time.Now())
|
||||||
if ctx.Response.StatusCode() != 200 || len(ctx.Response.Body()) < 2048 {
|
if ctx.Response.StatusCode() != 200 || len(ctx.Response.Body()) < 2048 {
|
||||||
@ -28,7 +38,7 @@ func TestHandlerPerformance(t *testing.T) {
|
|||||||
ctx.Response.ResetBody()
|
ctx.Response.ResetBody()
|
||||||
fmt.Printf("Start: %v\n", time.Now())
|
fmt.Printf("Start: %v\n", time.Now())
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
handler(ctx)
|
testHandler(ctx)
|
||||||
end = time.Now()
|
end = time.Now()
|
||||||
fmt.Printf("Done: %v\n", time.Now())
|
fmt.Printf("Done: %v\n", time.Now())
|
||||||
if ctx.Response.StatusCode() != 200 || len(ctx.Response.Body()) < 2048 {
|
if ctx.Response.StatusCode() != 200 || len(ctx.Response.Body()) < 2048 {
|
||||||
@ -42,7 +52,7 @@ func TestHandlerPerformance(t *testing.T) {
|
|||||||
ctx.Request.SetRequestURI("http://example.momar.xyz/")
|
ctx.Request.SetRequestURI("http://example.momar.xyz/")
|
||||||
fmt.Printf("Start: %v\n", time.Now())
|
fmt.Printf("Start: %v\n", time.Now())
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
handler(ctx)
|
testHandler(ctx)
|
||||||
end = time.Now()
|
end = time.Now()
|
||||||
fmt.Printf("Done: %v\n", time.Now())
|
fmt.Printf("Done: %v\n", time.Now())
|
||||||
if ctx.Response.StatusCode() != 200 || len(ctx.Response.Body()) < 1 {
|
if ctx.Response.StatusCode() != 200 || len(ctx.Response.Body()) < 1 {
|
@ -1,15 +1,17 @@
|
|||||||
package main
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
|
"os"
|
||||||
|
|
||||||
"github.com/akrylysov/pogreb"
|
"github.com/akrylysov/pogreb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetHSTSHeader returns a HSTS header with includeSubdomains & preload for MainDomainSuffix and RawDomain, or an empty
|
// GetHSTSHeader returns a HSTS header with includeSubdomains & preload for MainDomainSuffix and RawDomain, or an empty
|
||||||
// string for custom domains.
|
// string for custom domains.
|
||||||
func GetHSTSHeader(host []byte) string {
|
func GetHSTSHeader(host, mainDomainSuffix, rawDomain []byte) string {
|
||||||
if bytes.HasSuffix(host, MainDomainSuffix) || bytes.Equal(host, RawDomain) {
|
if bytes.HasSuffix(host, mainDomainSuffix) || bytes.Equal(host, rawDomain) {
|
||||||
return "max-age=63072000; includeSubdomains; preload"
|
return "max-age=63072000; includeSubdomains; preload"
|
||||||
} else {
|
} else {
|
||||||
return ""
|
return ""
|
||||||
@ -54,3 +56,12 @@ func PogrebGet(db *pogreb.DB, name []byte, obj interface{}) bool {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EnvOr reads an environment variable and returns a default value if it's empty.
|
||||||
|
// TODO: to helpers.go or use CLI framework
|
||||||
|
func EnvOr(env string, or string) string {
|
||||||
|
if v := os.Getenv(env); v != "" {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
return or
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user