Lindenii Project Forge
Add descriptive comments to most Go functions
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "context" "github.com/jackc/pgx/v5/pgtype" )
// getRepoInfo returns the filesystem path and direct // access permission for a given repo and a provided ssh public key.
// getRepoInfo returns the filesystem path and direct access permission for a // given repo and a provided ssh public key. // // TODO: Revamp.
func getRepoInfo(ctx context.Context, groupPath []string, repoName, sshPubkey string) (repoID int, fsPath string, access bool, contribReq, userType string, userID int, err error) { err = database.QueryRow(ctx, ` WITH RECURSIVE group_path_cte AS ( -- Start: match the first name in the path where parent_group IS NULL SELECT id, parent_group, name, 1 AS depth FROM groups WHERE name = ($1::text[])[1] AND parent_group IS NULL UNION ALL -- Recurse: join next segment of the path SELECT g.id, g.parent_group, g.name, group_path_cte.depth + 1 FROM groups g JOIN group_path_cte ON g.parent_group = group_path_cte.id WHERE g.name = ($1::text[])[group_path_cte.depth + 1] AND group_path_cte.depth + 1 <= cardinality($1::text[]) ) SELECT r.id, r.filesystem_path, CASE WHEN ugr.user_id IS NOT NULL THEN TRUE ELSE FALSE END AS has_role_in_group, r.contrib_requirements, COALESCE(u.type, ''), COALESCE(u.id, 0) FROM group_path_cte g JOIN repos r ON r.group_id = g.id LEFT JOIN ssh_public_keys s ON s.key_string = $3 LEFT JOIN users u ON u.id = s.user_id LEFT JOIN user_group_roles ugr ON ugr.group_id = g.id AND ugr.user_id = u.id WHERE g.depth = cardinality($1::text[]) AND r.name = $2 `, pgtype.FlatArray[string](groupPath), repoName, sshPubkey, ).Scan(&repoID, &fsPath, &access, &contribReq, &userType, &userID) return }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "html/template" "github.com/dgraph-io/ristretto/v2" "go.lindenii.runxiyu.org/lindenii-common/clog" )
// The key is the commit ID raw hash, followed by the file path.
var commitPathFileHTMLCache *ristretto.Cache[[]byte, template.HTML] func init() { var err error commitPathFileHTMLCache, err = ristretto.NewCache(&ristretto.Config[[]byte, template.HTML]{ NumCounters: 1e4, MaxCost: 1 << 60, BufferItems: 8192, }) if err != nil { clog.Fatal(1, "Error initializing commitPathFileHTMLCache: "+err.Error()) } }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "github.com/dgraph-io/ristretto/v2" "go.lindenii.runxiyu.org/lindenii-common/clog" )
// The key is the commit ID raw hash, followed by the file path.
var commitPathFileRawCache *ristretto.Cache[[]byte, string] func init() { var err error commitPathFileRawCache, err = ristretto.NewCache(&ristretto.Config[[]byte, string]{ NumCounters: 1e4, MaxCost: 1 << 60, BufferItems: 8192, }) if err != nil { clog.Fatal(1, "Error initializing commitPathFileRawCache: "+err.Error()) } }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "html/template" "github.com/dgraph-io/ristretto/v2" "go.lindenii.runxiyu.org/lindenii-common/clog" ) type treeReadmeCacheEntry struct { DisplayTree []displayTreeEntry ReadmeFilename string ReadmeRendered template.HTML }
// The key is the commit ID raw hash, optionally followed by a path.
var treeReadmeCache *ristretto.Cache[[]byte, treeReadmeCacheEntry] func init() { var err error treeReadmeCache, err = ristretto.NewCache(&ristretto.Config[[]byte, treeReadmeCacheEntry]{ NumCounters: 1e4, MaxCost: 1 << 60, BufferItems: 8192, }) if err != nil { clog.Fatal(1, "Error initializing indexPageCache: "+err.Error()) } }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "github.com/dgraph-io/ristretto/v2" "go.lindenii.runxiyu.org/lindenii-common/clog" )
// The key is the commit ID raw hash.
var indexCommitsDisplayCache *ristretto.Cache[[]byte, []commitDisplay] func init() { var err error indexCommitsDisplayCache, err = ristretto.NewCache(&ristretto.Config[[]byte, []commitDisplay]{ NumCounters: 1e4, MaxCost: 1 << 60, BufferItems: 8192, }) if err != nil { clog.Fatal(1, "Error initializing indexCommitsCache: "+err.Error()) } }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "bufio" "context" "errors" "os" "github.com/jackc/pgx/v5/pgxpool" "go.lindenii.runxiyu.org/lindenii-common/scfg" )
var database *pgxpool.Pool
// config holds the global configuration used by this instance. There is // currently no synchronization mechanism, so it must not be modified after // request handlers are spawned.
var config struct { HTTP struct { Net string `scfg:"net"` Addr string `scfg:"addr"` CookieExpiry int `scfg:"cookie_expiry"` Root string `scfg:"root"` ReadTimeout uint32 `scfg:"read_timeout"` WriteTimeout uint32 `scfg:"write_timeout"` IdleTimeout uint32 `scfg:"idle_timeout"` ReverseProxy bool `scfg:"reverse_proxy"` } `scfg:"http"` Hooks struct { Socket string `scfg:"socket"` Execs string `scfg:"execs"` } `scfg:"hooks"` Git struct { RepoDir string `scfg:"repo_dir"` } `scfg:"git"` SSH struct { Net string `scfg:"net"` Addr string `scfg:"addr"` Key string `scfg:"key"` Root string `scfg:"root"` } `scfg:"ssh"` IRC struct { Net string `scfg:"net"` Addr string `scfg:"addr"` TLS bool `scfg:"tls"` SendQ uint `scfg:"sendq"` Nick string `scfg:"nick"` User string `scfg:"user"` Gecos string `scfg:"gecos"` } `scfg:"irc"` General struct { Title string `scfg:"title"` } `scfg:"general"` DB struct { Type string `scfg:"type"` Conn string `scfg:"conn"` } `scfg:"db"` }
// loadConfig loads a configuration file from the specified path and unmarshals // it to the global [config] struct. This may race with concurrent reads from // [config]; additional synchronization is necessary if the configuration is to // be made reloadable. // // TODO: Currently, it returns an error when the user specifies any unknown // configuration patterns, but silently ignores fields in the [config] struct // that is not present in the user's configuration file. We would prefer the // exact opposite behavior.
func loadConfig(path string) (err error) { var configFile *os.File
var decoder *scfg.Decoder
if configFile, err = os.Open(path); err != nil { return err } defer configFile.Close()
decoder = scfg.NewDecoder(bufio.NewReader(configFile))
decoder := scfg.NewDecoder(bufio.NewReader(configFile))
if err = decoder.Decode(&config); err != nil { return err } if config.DB.Type != "postgres" { return errors.New("unsupported database type") } if database, err = pgxpool.New(context.Background(), config.DB.Conn); err != nil { return err } globalData["forge_title"] = config.General.Title return nil }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "context" "github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
)
// TODO: All database handling logic in all request handlers must be revamped. // We must ensure that each request has all logic in one transaction (subject // to exceptions if appropriate) so they get a consistent view of the database // at a single point. A failure to do so may cause things as serious as // privilege escalation. // database serves as the primary database handle for this entire application. // Transactions or single reads may be used therefrom. A [pgxpool.Pool] is // necessary to safely use pgx concurrently; pgx.Conn, etc. are insufficient. var database *pgxpool.Pool
// queryNameDesc is a helper function that executes a query and returns a
// list of name_desc_t results.
// list of nameDesc results. The query must return two string arguments, i.e. a // name and a description.
func queryNameDesc(ctx context.Context, query string, args ...any) (result []nameDesc, err error) { var rows pgx.Rows if rows, err = database.Query(ctx, query, args...); err != nil { return nil, err } defer rows.Close() for rows.Next() { var name, description string if err = rows.Scan(&name, &description); err != nil { return nil, err } result = append(result, nameDesc{name, description}) } return result, rows.Err() } // nameDesc holds a name and a description. type nameDesc struct { Name string Description string }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "bufio" "context" "errors" "io" "net/http" "net/url" "strings" "github.com/jackc/pgx/v5" )
// fedauth checks whether a user's SSH public key matches the remote username // they claim to have on the service. If so, the association is recorded.
func fedauth(ctx context.Context, userID int, service, remoteUsername, pubkey string) (bool, error) { var err error matched := false usernameEscaped := url.PathEscape(remoteUsername) var req *http.Request switch service {
// TODO: Services should be configurable by the instance administrator // and should not be hardcoded in the source code.
case "sr.ht": req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://meta.sr.ht/~"+usernameEscaped+".keys", nil) case "github": req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://github.com/"+usernameEscaped+".keys", nil) case "codeberg": req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://codeberg.org/"+usernameEscaped+".keys", nil) case "tangled": req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://tangled.sh/keys/"+usernameEscaped, nil) // TODO: Don't rely on one webview default: return false, errors.New("unknown federated service") } if err != nil { return false, err } resp, err := http.DefaultClient.Do(req) if err != nil { return false, err } defer func() { _ = resp.Body.Close() }() buf := bufio.NewReader(resp.Body) for { line, err := buf.ReadString('\n') if errors.Is(err, io.EOF) { break } else if err != nil { return false, err } lineSplit := strings.Split(line, " ") if len(lineSplit) < 2 { continue } line = strings.Join(lineSplit[:2], " ") if line == pubkey { matched = true break } } if !matched { return false, nil } var txn pgx.Tx if txn, err = database.Begin(ctx); err != nil { return false, err } defer func() { _ = txn.Rollback(ctx) }() if _, err = txn.Exec(ctx, `UPDATE users SET type = 'federated' WHERE id = $1 AND type = 'pubkey_only'`, userID); err != nil { return false, err } if _, err = txn.Exec(ctx, `INSERT INTO federated_identities (user_id, service, remote_username) VALUES ($1, $2, $3)`, userID, service, remoteUsername); err != nil { return false, err } if err = txn.Commit(ctx); err != nil { return false, err } return true, nil }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "bytes" "fmt" "strings" "time" "github.com/go-git/go-git/v5/plumbing/object" )
// get_patch_from_commit formats a commit object as if it was returned by
// fmtCommitPatch formats a commit object as if it was returned by
// git-format-patch. func fmtCommitPatch(commit *object.Commit) (final string, err error) { var patch *object.Patch var buf bytes.Buffer var author object.Signature var date string var commitTitle, commitDetails string
if _, patch, err = fmtCommitAsPatch(commit); err != nil {
if _, patch, err = commitToPatch(commit); err != nil {
return "", err } author = commit.Author date = author.When.Format(time.RFC1123Z) commitTitle, commitDetails, _ = strings.Cut(commit.Message, "\n") // This date is hardcoded in Git. fmt.Fprintf(&buf, "From %s Mon Sep 17 00:00:00 2001\n", commit.Hash) fmt.Fprintf(&buf, "From: %s <%s>\n", author.Name, author.Email) fmt.Fprintf(&buf, "Date: %s\n", date) fmt.Fprintf(&buf, "Subject: [PATCH] %s\n\n", commitTitle) if commitDetails != "" { commitDetails1, commitDetails2, _ := strings.Cut(commitDetails, "\n") if strings.TrimSpace(commitDetails1) == "" { commitDetails = commitDetails2 } buf.WriteString(commitDetails) buf.WriteString("\n") } buf.WriteString("---\n") fmt.Fprint(&buf, patch.Stats().String()) fmt.Fprintln(&buf) buf.WriteString(patch.String()) fmt.Fprintf(&buf, "\n-- \n2.48.1\n") return buf.String(), nil }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "errors" "io" "io/fs" "os" "path/filepath" )
// deployHooks deploys the git hooks client to the filesystem. // The git hooks client is expected to be embedded in resources_fs and must be // pre-compiled during the build process; see the Makefile.
// deployHooks deploys the git hooks client to the filesystem. The git hooks // client is expected to be embedded in resourcesFS and must be pre-compiled // during the build process; see the Makefile.
func deployHooks() (err error) { err = func() (err error) { var srcFD fs.File var dstFD *os.File if srcFD, err = resourcesFS.Open("hookc/hookc"); err != nil { return err } defer srcFD.Close() if dstFD, err = os.OpenFile(filepath.Join(config.Hooks.Execs, "hookc"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755); err != nil { return err } defer dstFD.Close() if _, err = io.Copy(dstFD, srcFD); err != nil { return err } return nil }() if err != nil { return err } // Go's embed filesystems do not store permissions; but in any case, // they would need to be 0o755: if err = os.Chmod(filepath.Join(config.Hooks.Execs, "hookc"), 0o755); err != nil { return err } for _, hookName := range []string{ "pre-receive", } { if err = os.Symlink(filepath.Join(config.Hooks.Execs, "hookc"), filepath.Join(config.Hooks.Execs, hookName)); err != nil { if !errors.Is(err, fs.ErrExist) { return err } // TODO: Maybe check if it points to the right place? } } return nil }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> // //go:build linux package main import ( "bytes" "context" "encoding/binary" "errors" "fmt" "io" "net" "os" "path/filepath" "strconv" "strings" "syscall" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/object" "github.com/jackc/pgx/v5" "go.lindenii.runxiyu.org/lindenii-common/ansiec" "go.lindenii.runxiyu.org/lindenii-common/clog" ) var ( errGetFD = errors.New("unable to get file descriptor") errGetUcred = errors.New("failed getsockopt") ) // hooksHandler handles a connection from hookc via the // unix socket. func hooksHandler(conn net.Conn) { var ctx context.Context var cancel context.CancelFunc var ucred *syscall.Ucred var err error var cookie []byte var packPass packPass var sshStderr io.Writer var hookRet byte defer conn.Close() ctx, cancel = context.WithCancel(context.Background()) defer cancel() // There aren't reasonable cases where someone would run this as // another user. if ucred, err = getUcred(conn); err != nil { if _, err = conn.Write([]byte{1}); err != nil { return } writeRedError(conn, "\nUnable to get peer credentials: %v", err) return } uint32uid := uint32(os.Getuid()) //#nosec G115 if ucred.Uid != uint32uid { if _, err = conn.Write([]byte{1}); err != nil { return } writeRedError(conn, "\nUID mismatch") return } cookie = make([]byte, 64) if _, err = conn.Read(cookie); err != nil { if _, err = conn.Write([]byte{1}); err != nil { return } writeRedError(conn, "\nFailed to read cookie: %v", err) return } { var ok bool packPass, ok = packPasses.Load(bytesToString(cookie)) if !ok { if _, err = conn.Write([]byte{1}); err != nil { return } writeRedError(conn, "\nInvalid handler cookie") return } } sshStderr = packPass.session.Stderr() _, _ = sshStderr.Write([]byte{'\n'}) hookRet = func() byte { var argc64 uint64 if err = binary.Read(conn, binary.NativeEndian, &argc64); err != nil { writeRedError(sshStderr, "Failed to read argc: %v", err) return 1 } var args []string for range argc64 { var arg bytes.Buffer for { nextByte := make([]byte, 1) n, err := conn.Read(nextByte) if err != nil || n != 1 { writeRedError(sshStderr, "Failed to read arg: %v", err) return 1 } if nextByte[0] == 0 { break } arg.WriteByte(nextByte[0]) } args = append(args, arg.String()) } gitEnv := make(map[string]string) for { var envLine bytes.Buffer for { nextByte := make([]byte, 1) n, err := conn.Read(nextByte) if err != nil || n != 1 { writeRedError(sshStderr, "Failed to read environment variable: %v", err) return 1 } if nextByte[0] == 0 { break } envLine.WriteByte(nextByte[0]) } if envLine.Len() == 0 { break } kv := envLine.String() parts := strings.SplitN(kv, "=", 2) if len(parts) < 2 { writeRedError(sshStderr, "Invalid environment variable line: %v", kv) return 1 } gitEnv[parts[0]] = parts[1] } var stdin bytes.Buffer if _, err = io.Copy(&stdin, conn); err != nil { writeRedError(conn, "Failed to read to the stdin buffer: %v", err) } switch filepath.Base(args[0]) { case "pre-receive": if packPass.directAccess { return 0 } allOK := true for { var line, oldOID, rest, newIOID, refName string var found bool var oldHash, newHash plumbing.Hash var oldCommit, newCommit *object.Commit var pushOptCount int pushOptCount, err = strconv.Atoi(gitEnv["GIT_PUSH_OPTION_COUNT"]) if err != nil { writeRedError(sshStderr, "Failed to parse GIT_PUSH_OPTION_COUNT: %v", err) return 1 } // TODO: Allow existing users (even if they are already federated or registered) to add a federated user ID... though perhaps this should be in the normal SSH interface instead of the git push interface? // Also it'd be nice to be able to combine users or whatever if packPass.contribReq == "federated" && packPass.userType != "federated" && packPass.userType != "registered" { if pushOptCount == 0 { writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu") return 1 } for pushOptIndex := range pushOptCount { pushOpt, ok := gitEnv[fmt.Sprintf("GIT_PUSH_OPTION_%d", pushOptIndex)] if !ok { writeRedError(sshStderr, "Failed to get push option %d", pushOptIndex) return 1 } if strings.HasPrefix(pushOpt, "fedid=") { fedUserID := strings.TrimPrefix(pushOpt, "fedid=") service, username, found := strings.Cut(fedUserID, ":") if !found { writeRedError(sshStderr, "Invalid federated user identifier %#v does not contain a colon", fedUserID) return 1 } ok, err := fedauth(ctx, packPass.userID, service, username, packPass.pubkey) if err != nil { writeRedError(sshStderr, "Failed to verify federated user identifier %#v: %v", fedUserID, err) return 1 } if !ok { writeRedError(sshStderr, "Failed to verify federated user identifier %#v: you don't seem to be on the list", fedUserID) return 1 } break } if pushOptIndex == pushOptCount-1 { writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu") return 1 } } } line, err = stdin.ReadString('\n') if errors.Is(err, io.EOF) { break } else if err != nil { writeRedError(sshStderr, "Failed to read pre-receive line: %v", err) return 1 } line = line[:len(line)-1] oldOID, rest, found = strings.Cut(line, " ") if !found { writeRedError(sshStderr, "Invalid pre-receive line: %v", line) return 1 } newIOID, refName, found = strings.Cut(rest, " ") if !found { writeRedError(sshStderr, "Invalid pre-receive line: %v", line) return 1 } if strings.HasPrefix(refName, "refs/heads/contrib/") { if allZero(oldOID) { // New branch fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName) var newMRLocalID int if packPass.userID != 0 { err = database.QueryRow(ctx, "INSERT INTO merge_requests (repo_id, creator, source_ref, status) VALUES ($1, $2, $3, 'open') RETURNING repo_local_id", packPass.repoID, packPass.userID, strings.TrimPrefix(refName, "refs/heads/"), ).Scan(&newMRLocalID) } else { err = database.QueryRow(ctx, "INSERT INTO merge_requests (repo_id, source_ref, status) VALUES ($1, $2, 'open') RETURNING repo_local_id", packPass.repoID, strings.TrimPrefix(refName, "refs/heads/"), ).Scan(&newMRLocalID) } if err != nil { writeRedError(sshStderr, "Error creating merge request: %v", err) return 1 } mergeRequestWebURL := fmt.Sprintf("%s/contrib/%d/", genHTTPRemoteURL(packPass.groupPath, packPass.repoName), newMRLocalID) fmt.Fprintln(sshStderr, ansiec.Blue+"Created merge request at", mergeRequestWebURL+ansiec.Reset) select { case ircSendBuffered <- "PRIVMSG #chat :New merge request at " + mergeRequestWebURL: default: clog.Error("IRC SendQ exceeded") } } else { // Existing contrib branch var existingMRUser int var isAncestor bool err = database.QueryRow(ctx, "SELECT COALESCE(creator, 0) FROM merge_requests WHERE source_ref = $1 AND repo_id = $2", strings.TrimPrefix(refName, "refs/heads/"), packPass.repoID, ).Scan(&existingMRUser) if err != nil { if errors.Is(err, pgx.ErrNoRows) { writeRedError(sshStderr, "No existing merge request for existing contrib branch: %v", err) } else { writeRedError(sshStderr, "Error querying for existing merge request: %v", err) } return 1 } if existingMRUser == 0 { allOK = false fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs to unowned MR)") continue } if existingMRUser != packPass.userID { allOK = false fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs another user's MR)") continue } oldHash = plumbing.NewHash(oldOID) if oldCommit, err = packPass.repo.CommitObject(oldHash); err != nil { writeRedError(sshStderr, "Daemon failed to get old commit: %v", err) return 1 } // Potential BUG: I'm not sure if new_commit is guaranteed to be // detectable as they haven't been merged into the main repo's // objects yet. But it seems to work, and I don't think there's // any reason for this to only work intermitently. newHash = plumbing.NewHash(newIOID) if newCommit, err = packPass.repo.CommitObject(newHash); err != nil { writeRedError(sshStderr, "Daemon failed to get new commit: %v", err) return 1 } if isAncestor, err = oldCommit.IsAncestor(newCommit); err != nil { writeRedError(sshStderr, "Daemon failed to check if old commit is ancestor: %v", err) return 1 } if !isAncestor { // TODO: Create MR snapshot ref instead allOK = false fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(force pushes are not supported yet)") continue } fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName) } } else { // Non-contrib branch allOK = false fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(you cannot push to branches outside of contrib/*)") } } fmt.Fprintln(sshStderr) if allOK { fmt.Fprintln(sshStderr, "Overall "+ansiec.Green+"ACK"+ansiec.Reset+" (all checks passed)") return 0 } fmt.Fprintln(sshStderr, "Overall "+ansiec.Red+"NAK"+ansiec.Reset+" (one or more branches failed checks)") return 1 default: fmt.Fprintln(sshStderr, ansiec.Red+"Invalid hook:", args[0]+ansiec.Reset) return 1 } }() fmt.Fprintln(sshStderr) _, _ = conn.Write([]byte{hookRet}) }
// serveGitHooks handles connections on the specified network listener and // treats incoming connections as those from git hook handlers by spawning // sessions. The listener must be a SOCK_STREAM UNIX domain socket. The // function itself blocks.
func serveGitHooks(listener net.Listener) error { for { conn, err := listener.Accept() if err != nil { return err } go hooksHandler(conn) } }
// getUcred fetches connection credentials as a [syscall.Ucred] from a given // [net.Conn]. It panics when conn is not a [net.UnixConn].
func getUcred(conn net.Conn) (ucred *syscall.Ucred, err error) { unixConn := conn.(*net.UnixConn) var unixConnFD *os.File if unixConnFD, err = unixConn.File(); err != nil { return nil, errGetFD } defer unixConnFD.Close() if ucred, err = syscall.GetsockoptUcred(int(unixConnFD.Fd()), syscall.SOL_SOCKET, syscall.SO_PEERCRED); err != nil { return nil, errGetUcred } return ucred, nil }
// allZero returns true if all runes in a given string are '0'. The comparison // is not constant time and must not be used in contexts where time-based side // channel attacks are a concern.
func allZero(s string) bool { for _, r := range s { if r != '0' { return false } } return true }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> // //go:build !linux package main import ( "bytes" "context" "encoding/binary" "errors" "fmt" "io" "net" "path/filepath" "strconv" "strings" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/object" "github.com/jackc/pgx/v5" "go.lindenii.runxiyu.org/lindenii-common/ansiec" "go.lindenii.runxiyu.org/lindenii-common/clog" ) var errGetFD = errors.New("unable to get file descriptor") // hooksHandler handles a connection from hookc via the // unix socket. func hooksHandler(conn net.Conn) { var ctx context.Context var cancel context.CancelFunc var err error var cookie []byte var packPass packPass var sshStderr io.Writer var hookRet byte defer conn.Close() ctx, cancel = context.WithCancel(context.Background()) defer cancel() // TODO: Validate that the connection is from the right user. cookie = make([]byte, 64) if _, err = conn.Read(cookie); err != nil { if _, err = conn.Write([]byte{1}); err != nil { return } writeRedError(conn, "\nFailed to read cookie: %v", err) return } { var ok bool packPass, ok = packPasses.Load(bytesToString(cookie)) if !ok { if _, err = conn.Write([]byte{1}); err != nil { return } writeRedError(conn, "\nInvalid handler cookie") return } } sshStderr = packPass.session.Stderr() _, _ = sshStderr.Write([]byte{'\n'}) hookRet = func() byte { var argc64 uint64 if err = binary.Read(conn, binary.NativeEndian, &argc64); err != nil { writeRedError(sshStderr, "Failed to read argc: %v", err) return 1 } var args []string for range argc64 { var arg bytes.Buffer for { nextByte := make([]byte, 1) n, err := conn.Read(nextByte) if err != nil || n != 1 { writeRedError(sshStderr, "Failed to read arg: %v", err) return 1 } if nextByte[0] == 0 { break } arg.WriteByte(nextByte[0]) } args = append(args, arg.String()) } gitEnv := make(map[string]string) for { var envLine bytes.Buffer for { nextByte := make([]byte, 1) n, err := conn.Read(nextByte) if err != nil || n != 1 { writeRedError(sshStderr, "Failed to read environment variable: %v", err) return 1 } if nextByte[0] == 0 { break } envLine.WriteByte(nextByte[0]) } if envLine.Len() == 0 { break } kv := envLine.String() parts := strings.SplitN(kv, "=", 2) if len(parts) < 2 { writeRedError(sshStderr, "Invalid environment variable line: %v", kv) return 1 } gitEnv[parts[0]] = parts[1] } var stdin bytes.Buffer if _, err = io.Copy(&stdin, conn); err != nil { writeRedError(conn, "Failed to read to the stdin buffer: %v", err) } switch filepath.Base(args[0]) { case "pre-receive": if packPass.directAccess { return 0 } allOK := true for { var line, oldOID, rest, newIOID, refName string var found bool var oldHash, newHash plumbing.Hash var oldCommit, newCommit *object.Commit var pushOptCount int pushOptCount, err = strconv.Atoi(gitEnv["GIT_PUSH_OPTION_COUNT"]) if err != nil { writeRedError(sshStderr, "Failed to parse GIT_PUSH_OPTION_COUNT: %v", err) return 1 } // TODO: Allow existing users (even if they are already federated or registered) to add a federated user ID... though perhaps this should be in the normal SSH interface instead of the git push interface? // Also it'd be nice to be able to combine users or whatever if packPass.contribReq == "federated" && packPass.userType != "federated" && packPass.userType != "registered" { if pushOptCount == 0 { writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu") return 1 } for pushOptIndex := range pushOptCount { pushOpt, ok := gitEnv[fmt.Sprintf("GIT_PUSH_OPTION_%d", pushOptIndex)] if !ok { writeRedError(sshStderr, "Failed to get push option %d", pushOptIndex) return 1 } if strings.HasPrefix(pushOpt, "fedid=") { fedUserID := strings.TrimPrefix(pushOpt, "fedid=") service, username, found := strings.Cut(fedUserID, ":") if !found { writeRedError(sshStderr, "Invalid federated user identifier %#v does not contain a colon", fedUserID) return 1 } ok, err := fedauth(ctx, packPass.userID, service, username, packPass.pubkey) if err != nil { writeRedError(sshStderr, "Failed to verify federated user identifier %#v: %v", fedUserID, err) return 1 } if !ok { writeRedError(sshStderr, "Failed to verify federated user identifier %#v: you don't seem to be on the list", fedUserID) return 1 } break } if pushOptIndex == pushOptCount-1 { writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu") return 1 } } } line, err = stdin.ReadString('\n') if errors.Is(err, io.EOF) { break } else if err != nil { writeRedError(sshStderr, "Failed to read pre-receive line: %v", err) return 1 } line = line[:len(line)-1] oldOID, rest, found = strings.Cut(line, " ") if !found { writeRedError(sshStderr, "Invalid pre-receive line: %v", line) return 1 } newIOID, refName, found = strings.Cut(rest, " ") if !found { writeRedError(sshStderr, "Invalid pre-receive line: %v", line) return 1 } if strings.HasPrefix(refName, "refs/heads/contrib/") { if allZero(oldOID) { // New branch fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName) var newMRLocalID int if packPass.userID != 0 { err = database.QueryRow(ctx, "INSERT INTO merge_requests (repo_id, creator, source_ref, status) VALUES ($1, $2, $3, 'open') RETURNING repo_local_id", packPass.repoID, packPass.userID, strings.TrimPrefix(refName, "refs/heads/"), ).Scan(&newMRLocalID) } else { err = database.QueryRow(ctx, "INSERT INTO merge_requests (repo_id, source_ref, status) VALUES ($1, $2, 'open') RETURNING repo_local_id", packPass.repoID, strings.TrimPrefix(refName, "refs/heads/"), ).Scan(&newMRLocalID) } if err != nil { writeRedError(sshStderr, "Error creating merge request: %v", err) return 1 } mergeRequestWebURL := fmt.Sprintf("%s/contrib/%d/", genHTTPRemoteURL(packPass.groupPath, packPass.repoName), newMRLocalID) fmt.Fprintln(sshStderr, ansiec.Blue+"Created merge request at", mergeRequestWebURL+ansiec.Reset) select { case ircSendBuffered <- "PRIVMSG #chat :New merge request at " + mergeRequestWebURL: default: clog.Error("IRC SendQ exceeded") } } else { // Existing contrib branch var existingMRUser int var isAncestor bool err = database.QueryRow(ctx, "SELECT COALESCE(creator, 0) FROM merge_requests WHERE source_ref = $1 AND repo_id = $2", strings.TrimPrefix(refName, "refs/heads/"), packPass.repoID, ).Scan(&existingMRUser) if err != nil { if errors.Is(err, pgx.ErrNoRows) { writeRedError(sshStderr, "No existing merge request for existing contrib branch: %v", err) } else { writeRedError(sshStderr, "Error querying for existing merge request: %v", err) } return 1 } if existingMRUser == 0 { allOK = false fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs to unowned MR)") continue } if existingMRUser != packPass.userID { allOK = false fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs another user's MR)") continue } oldHash = plumbing.NewHash(oldOID) if oldCommit, err = packPass.repo.CommitObject(oldHash); err != nil { writeRedError(sshStderr, "Daemon failed to get old commit: %v", err) return 1 } // Potential BUG: I'm not sure if new_commit is guaranteed to be // detectable as they haven't been merged into the main repo's // objects yet. But it seems to work, and I don't think there's // any reason for this to only work intermitently. newHash = plumbing.NewHash(newIOID) if newCommit, err = packPass.repo.CommitObject(newHash); err != nil { writeRedError(sshStderr, "Daemon failed to get new commit: %v", err) return 1 } if isAncestor, err = oldCommit.IsAncestor(newCommit); err != nil { writeRedError(sshStderr, "Daemon failed to check if old commit is ancestor: %v", err) return 1 } if !isAncestor { // TODO: Create MR snapshot ref instead allOK = false fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(force pushes are not supported yet)") continue } fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName) } } else { // Non-contrib branch allOK = false fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(you cannot push to branches outside of contrib/*)") } } fmt.Fprintln(sshStderr) if allOK { fmt.Fprintln(sshStderr, "Overall "+ansiec.Green+"ACK"+ansiec.Reset+" (all checks passed)") return 0 } fmt.Fprintln(sshStderr, "Overall "+ansiec.Red+"NAK"+ansiec.Reset+" (one or more branches failed checks)") return 1 default: fmt.Fprintln(sshStderr, ansiec.Red+"Invalid hook:", args[0]+ansiec.Reset) return 1 } }() fmt.Fprintln(sshStderr) _, _ = conn.Write([]byte{hookRet}) }
// serveGitHooks handles connections on the specified network listener and // treats incoming connections as those from git hook handlers by spawning // sessions. The listener must be a SOCK_STREAM UNIX domain socket. The // function itself blocks.
func serveGitHooks(listener net.Listener) error { for { conn, err := listener.Accept() if err != nil { return err } go hooksHandler(conn) } }
// allZero returns true if all runes in a given string are '0'. The comparison // is not constant time and must not be used in contexts where time-based side // channel attacks are a concern.
func allZero(s string) bool { for _, r := range s { if r != '0' { return false } } return true }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "github.com/go-git/go-git/v5" gitConfig "github.com/go-git/go-git/v5/config" gitFmtConfig "github.com/go-git/go-git/v5/plumbing/format/config" )
// gitInit initializes a bare git repository with the // forge-deployed hooks directory as the hooksPath.
// gitInit initializes a bare git repository with the forge-deployed hooks // directory as the hooksPath.
func gitInit(repoPath string) (err error) { var repo *git.Repository var gitConf *gitConfig.Config if repo, err = git.PlainInit(repoPath, true); err != nil { return err } if gitConf, err = repo.Config(); err != nil { return err } gitConf.Raw.SetOption("core", gitFmtConfig.NoSubsection, "hooksPath", config.Hooks.Execs) gitConf.Raw.SetOption("receive", gitFmtConfig.NoSubsection, "advertisePushOptions", "true") if err = repo.SetConfig(gitConf); err != nil { return err } return nil }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "context" "errors" "io" "iter" "os" "strings" "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/object" "github.com/jackc/pgx/v5/pgtype" ) // openRepo opens a git repository by group and repo name.
// // TODO: This should be deprecated in favor of doing it in the relevant // request/router context in the future, as it cannot cover the nuance of // fields needed.
func openRepo(ctx context.Context, groupPath []string, repoName string) (repo *git.Repository, description string, repoID int, err error) { var fsPath string err = database.QueryRow(ctx, ` WITH RECURSIVE group_path_cte AS ( -- Start: match the first name in the path where parent_group IS NULL SELECT id, parent_group, name, 1 AS depth FROM groups WHERE name = ($1::text[])[1] AND parent_group IS NULL UNION ALL -- Recurse: join next segment of the path SELECT g.id, g.parent_group, g.name, group_path_cte.depth + 1 FROM groups g JOIN group_path_cte ON g.parent_group = group_path_cte.id WHERE g.name = ($1::text[])[group_path_cte.depth + 1] AND group_path_cte.depth + 1 <= cardinality($1::text[]) ) SELECT r.filesystem_path, COALESCE(r.description, ''), r.id FROM group_path_cte g JOIN repos r ON r.group_id = g.id WHERE g.depth = cardinality($1::text[]) AND r.name = $2 `, pgtype.FlatArray[string](groupPath), repoName).Scan(&fsPath, &description, &repoID) if err != nil { return } repo, err = git.PlainOpen(fsPath) return } // go-git's tree entries are not friendly for use in HTML templates.
// This struct is a wrapper that is friendlier for use in templating.
type displayTreeEntry struct { Name string Mode string Size int64 IsFile bool IsSubtree bool }
// makeDisplayTree takes git trees of form [object.Tree] and creates a slice of // [displayTreeEntry] for easier templating.
func makeDisplayTree(tree *object.Tree) (displayTree []displayTreeEntry) { for _, entry := range tree.Entries { displayEntry := displayTreeEntry{} //exhaustruct:ignore var err error var osMode os.FileMode if osMode, err = entry.Mode.ToOSFileMode(); err != nil { displayEntry.Mode = "x---------" } else { displayEntry.Mode = osMode.String() } displayEntry.IsFile = entry.Mode.IsFile() if displayEntry.Size, err = tree.Size(entry.Name); err != nil { displayEntry.Size = 0 } displayEntry.Name = strings.TrimPrefix(entry.Name, "/") displayTree = append(displayTree, displayEntry) } return displayTree }
// commitIterSeqErr creates an [iter.Seq[*object.Commit]] from an // [object.CommitIter], and additionally returns a pointer to error. // The pointer to error is guaranteed to be populated with either nil or the // error returned by the commit iterator after the returned iterator is // finished.
func commitIterSeqErr(commitIter object.CommitIter) (iter.Seq[*object.Commit], *error) { var err error return func(yield func(*object.Commit) bool) { for { commit, err2 := commitIter.Next() if err2 != nil { if errors.Is(err2, io.EOF) { return } err = err2 return } if !yield(commit) { return } } }, &err }
func iterSeqLimit[T any](s iter.Seq[T], n uint) iter.Seq[T] { return func(yield func(T) bool) { var iterations uint for v := range s { if iterations > n-1 { return } if !yield(v) { return } iterations++ } } }
// getRecentCommits fetches numCommits commits, starting from the headHash in a // repo.
func getRecentCommits(repo *git.Repository, headHash plumbing.Hash, numCommits int) (recentCommits []*object.Commit, err error) { var commitIter object.CommitIter var thisCommit *object.Commit commitIter, err = repo.Log(&git.LogOptions{From: headHash}) //exhaustruct:ignore if err != nil { return nil, err } recentCommits = make([]*object.Commit, 0) defer commitIter.Close() if numCommits < 0 { for { thisCommit, err = commitIter.Next() if errors.Is(err, io.EOF) { return recentCommits, nil } else if err != nil { return nil, err } recentCommits = append(recentCommits, thisCommit) } } else { for range numCommits { thisCommit, err = commitIter.Next() if errors.Is(err, io.EOF) { return recentCommits, nil } else if err != nil { return nil, err } recentCommits = append(recentCommits, thisCommit) } } return recentCommits, err }
// getRecentCommitsDisplay generates a slice of [commitDisplay] friendly for // use in HTML templates, consisting of numCommits commits from headhash in the // repo.
func getRecentCommitsDisplay(repo *git.Repository, headHash plumbing.Hash, numCommits int) (recentCommits []commitDisplay, err error) { var commitIter object.CommitIter var thisCommit *object.Commit commitIter, err = repo.Log(&git.LogOptions{From: headHash}) //exhaustruct:ignore if err != nil { return nil, err } recentCommits = make([]commitDisplay, 0) defer commitIter.Close() if numCommits < 0 { for { thisCommit, err = commitIter.Next() if errors.Is(err, io.EOF) { return recentCommits, nil } else if err != nil { return nil, err } recentCommits = append(recentCommits, commitDisplay{ thisCommit.Hash, thisCommit.Author, thisCommit.Committer, thisCommit.Message, thisCommit.TreeHash, }) } } else { for range numCommits { thisCommit, err = commitIter.Next() if errors.Is(err, io.EOF) { return recentCommits, nil } else if err != nil { return nil, err } recentCommits = append(recentCommits, commitDisplay{ thisCommit.Hash, thisCommit.Author, thisCommit.Committer, thisCommit.Message, thisCommit.TreeHash, }) } } return recentCommits, err } type commitDisplay struct { Hash plumbing.Hash Author object.Signature Committer object.Signature Message string TreeHash plumbing.Hash }
func fmtCommitAsPatch(commit *object.Commit) (parentCommitHash plumbing.Hash, patch *object.Patch, err error) {
// commitToPatch creates an [object.Patch] from the first parent of a given // [object.Commit]. // // TODO: This function should be deprecated as it only diffs with the first // parent and does not correctly handle merge commits. func commitToPatch(commit *object.Commit) (parentCommitHash plumbing.Hash, patch *object.Patch, err error) {
var parentCommit *object.Commit var commitTree *object.Tree parentCommit, err = commit.Parent(0) switch { case errors.Is(err, object.ErrParentNotFound): if commitTree, err = commit.Tree(); err != nil { return } if patch, err = nullTree.Patch(commitTree); err != nil { return } case err != nil: return default: parentCommitHash = parentCommit.Hash if patch, err = parentCommit.Patch(commit); err != nil { return } } return } var nullTree object.Tree
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "net/http" )
// getUserFromRequest returns the user ID and username associated with the // session cookie in a given [http.Request].
func getUserFromRequest(request *http.Request) (id int, username string, err error) { var sessionCookie *http.Cookie if sessionCookie, err = request.Cookie("session"); err != nil { return } err = database.QueryRow( request.Context(), "SELECT user_id, COALESCE(username, '') FROM users u JOIN sessions s ON u.id = s.user_id WHERE s.session_id = $1;", sessionCookie.Value, ).Scan(&id, &username) return }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main
// globalData is passed as "global" when rendering HTML templates.
// globalData is passed as "global" when rendering HTML templates and contains // global data that should stay constant throughout an execution of Lindenii // Forge as no synchronization mechanism is provided for updating it.
var globalData = map[string]any{ "server_public_key_string": &serverPubkeyString, "server_public_key_fingerprint": &serverPubkeyFP, "forge_version": VERSION, // Some other ones are populated after config parsing }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "net/http" "strings" "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/storer" )
// httpHandleRepoBranches provides the branches page in repos.
func httpHandleRepoBranches(writer http.ResponseWriter, _ *http.Request, params map[string]any) { var repo *git.Repository var repoName string var groupPath []string var err error var notes []string var branches []string var branchesIter storer.ReferenceIter repo, repoName, groupPath = params["repo"].(*git.Repository), params["repo_name"].(string), params["group_path"].([]string) if strings.Contains(repoName, "\n") || sliceContainsNewlines(groupPath) { notes = append(notes, "Path contains newlines; HTTP Git access impossible") } branchesIter, err = repo.Branches() if err == nil { _ = branchesIter.ForEach(func(branch *plumbing.Reference) error { branches = append(branches, branch.Name().Short()) return nil }) } params["branches"] = branches params["http_clone_url"] = genHTTPRemoteURL(groupPath, repoName) params["ssh_clone_url"] = genSSHRemoteURL(groupPath, repoName) params["notes"] = notes renderTemplate(writer, "repo_branches", params) }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "net/http" "runtime" )
// httpHandleGC handles an HTTP request by calling the garbage collector and // redirecting the user back to the home page. // // TODO: This should probably be removed or hidden behind an administrator's // control panel, in the future.
func httpHandleGC(writer http.ResponseWriter, request *http.Request, _ map[string]any) { runtime.GC() http.Redirect(writer, request, "/", http.StatusSeeOther) }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "errors" "net/http" "path/filepath" "strconv" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgtype" )
// httpHandleGroupIndex provides index pages for groups, which includes a list // of its subgroups and repos, as well as a form for group maintainers to // create repos.
func httpHandleGroupIndex(writer http.ResponseWriter, request *http.Request, params map[string]any) { var groupPath []string var repos []nameDesc var subgroups []nameDesc var err error var groupID int var groupDesc string groupPath = params["group_path"].([]string) // The group itself err = database.QueryRow(request.Context(), ` WITH RECURSIVE group_path_cte AS ( SELECT id, parent_group, name, 1 AS depth FROM groups WHERE name = ($1::text[])[1] AND parent_group IS NULL UNION ALL SELECT g.id, g.parent_group, g.name, group_path_cte.depth + 1 FROM groups g JOIN group_path_cte ON g.parent_group = group_path_cte.id WHERE g.name = ($1::text[])[group_path_cte.depth + 1] AND group_path_cte.depth + 1 <= cardinality($1::text[]) ) SELECT c.id, COALESCE(g.description, '') FROM group_path_cte c JOIN groups g ON g.id = c.id WHERE c.depth = cardinality($1::text[]) `, pgtype.FlatArray[string](groupPath), ).Scan(&groupID, &groupDesc) if errors.Is(err, pgx.ErrNoRows) { errorPage404(writer, params) return } else if err != nil { errorPage500(writer, params, "Error getting group: "+err.Error()) return } // ACL var count int err = database.QueryRow(request.Context(), ` SELECT COUNT(*) FROM user_group_roles WHERE user_id = $1 AND group_id = $2 `, params["user_id"].(int), groupID).Scan(&count) if err != nil { errorPage500(writer, params, "Error checking access: "+err.Error()) return } directAccess := (count > 0) if request.Method == http.MethodPost { if !directAccess { errorPage403(writer, params, "You do not have direct access to this group") return } repoName := request.FormValue("repo_name") repoDesc := request.FormValue("repo_desc") contribReq := request.FormValue("repo_contrib") if repoName == "" { errorPage400(writer, params, "Repo name is required") return } var newRepoID int err := database.QueryRow( request.Context(), `INSERT INTO repos (name, description, group_id, contrib_requirements) VALUES ($1, $2, $3, $4) RETURNING id`, repoName, repoDesc, groupID, contribReq, ).Scan(&newRepoID) if err != nil { errorPage500(writer, params, "Error creating repo: "+err.Error()) return } filePath := filepath.Join(config.Git.RepoDir, strconv.Itoa(newRepoID)+".git") _, err = database.Exec( request.Context(), `UPDATE repos SET filesystem_path = $1 WHERE id = $2`, filePath, newRepoID, ) if err != nil { errorPage500(writer, params, "Error updating repo path: "+err.Error()) return } if err = gitInit(filePath); err != nil { errorPage500(writer, params, "Error initializing repo: "+err.Error()) return } redirectUnconditionally(writer, request) return } // Repos var rows pgx.Rows rows, err = database.Query(request.Context(), ` SELECT name, COALESCE(description, '') FROM repos WHERE group_id = $1 `, groupID) if err != nil { errorPage500(writer, params, "Error getting repos: "+err.Error()) return } defer rows.Close() for rows.Next() { var name, description string if err = rows.Scan(&name, &description); err != nil { errorPage500(writer, params, "Error getting repos: "+err.Error()) return } repos = append(repos, nameDesc{name, description}) } if err = rows.Err(); err != nil { errorPage500(writer, params, "Error getting repos: "+err.Error()) return } // Subgroups rows, err = database.Query(request.Context(), ` SELECT name, COALESCE(description, '') FROM groups WHERE parent_group = $1 `, groupID) if err != nil { errorPage500(writer, params, "Error getting subgroups: "+err.Error()) return } defer rows.Close() for rows.Next() { var name, description string if err = rows.Scan(&name, &description); err != nil { errorPage500(writer, params, "Error getting subgroups: "+err.Error()) return } subgroups = append(subgroups, nameDesc{name, description}) } if err = rows.Err(); err != nil { errorPage500(writer, params, "Error getting subgroups: "+err.Error()) return } params["repos"] = repos params["subgroups"] = subgroups params["description"] = groupDesc params["direct_access"] = directAccess renderTemplate(writer, "group", params) }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "net/http" "runtime" "github.com/dustin/go-humanize" )
// httpHandleIndex provides the main index page which includes a list of groups // and some global information such as SSH keys.
func httpHandleIndex(writer http.ResponseWriter, request *http.Request, params map[string]any) { var err error var groups []nameDesc groups, err = queryNameDesc(request.Context(), "SELECT name, COALESCE(description, '') FROM groups WHERE parent_group IS NULL") if err != nil { errorPage500(writer, params, "Error querying groups: "+err.Error()) return } params["groups"] = groups // Memory currently allocated memstats := runtime.MemStats{} //exhaustruct:ignore runtime.ReadMemStats(&memstats) params["mem"] = humanize.IBytes(memstats.Alloc) renderTemplate(writer, "index", params) }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "crypto/rand" "encoding/base64" "errors" "fmt" "net/http" "time" "github.com/alexedwards/argon2id" "github.com/jackc/pgx/v5" )
// httpHandleLogin provides the login page for local users.
func httpHandleLogin(writer http.ResponseWriter, request *http.Request, params map[string]any) { var username, password string var userID int var passwordHash string var err error var passwordMatches bool var cookieValue string var now time.Time var expiry time.Time var cookie http.Cookie if request.Method != http.MethodPost { renderTemplate(writer, "login", params) return } username = request.PostFormValue("username") password = request.PostFormValue("password") err = database.QueryRow(request.Context(), "SELECT id, COALESCE(password, '') FROM users WHERE username = $1", username, ).Scan(&userID, &passwordHash) if err != nil { if errors.Is(err, pgx.ErrNoRows) { params["login_error"] = "Unknown username" renderTemplate(writer, "login", params) return } errorPage500(writer, params, "Error querying user information: "+err.Error()) return } if passwordHash == "" { params["login_error"] = "User has no password" renderTemplate(writer, "login", params) return } if passwordMatches, err = argon2id.ComparePasswordAndHash(password, passwordHash); err != nil { errorPage500(writer, params, "Error comparing password and hash: "+err.Error()) return } if !passwordMatches { params["login_error"] = "Invalid password" renderTemplate(writer, "login", params) return } if cookieValue, err = randomUrlsafeStr(16); err != nil { errorPage500(writer, params, "Error getting random string: "+err.Error()) return } now = time.Now() expiry = now.Add(time.Duration(config.HTTP.CookieExpiry) * time.Second) cookie = http.Cookie{ Name: "session", Value: cookieValue, SameSite: http.SameSiteLaxMode, HttpOnly: true, Secure: false, // TODO Expires: expiry, Path: "/", } //exhaustruct:ignore http.SetCookie(writer, &cookie) _, err = database.Exec(request.Context(), "INSERT INTO sessions (user_id, session_id) VALUES ($1, $2)", userID, cookieValue) if err != nil { errorPage500(writer, params, "Error inserting session: "+err.Error()) return } http.Redirect(writer, request, "/", http.StatusSeeOther) } // randomUrlsafeStr generates a random string of the given entropic size // using the URL-safe base64 encoding. The actual size of the string returned // will be 4*sz. func randomUrlsafeStr(sz int) (string, error) { r := make([]byte, 3*sz) _, err := rand.Read(r) if err != nil { return "", fmt.Errorf("error generating random string: %w", err) } return base64.RawURLEncoding.EncodeToString(r), nil }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "fmt" "net/http" "strings" "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/filemode" "github.com/go-git/go-git/v5/plumbing/format/diff" "github.com/go-git/go-git/v5/plumbing/object" "go.lindenii.runxiyu.org/lindenii-common/misc" )
// The file patch type from go-git isn't really usable in HTML templates // either.
// usableFilePatch is a [diff.FilePatch] that is structured in a way more // friendly for use in HTML templates.
type usableFilePatch struct { From diff.File To diff.File Chunks []usableChunk }
// usableChunk is a [diff.Chunk] that is structured in a way more friendly for // use in HTML templates.
type usableChunk struct { Operation diff.Operation Content string } func httpHandleRepoCommit(writer http.ResponseWriter, request *http.Request, params map[string]any) { var repo *git.Repository var commitIDStrSpec, commitIDStrSpecNoSuffix string var commitID plumbing.Hash var parentCommitHash plumbing.Hash var commitObj *object.Commit var commitIDStr string var err error var patch *object.Patch repo, commitIDStrSpec = params["repo"].(*git.Repository), params["commit_id"].(string) commitIDStrSpecNoSuffix = strings.TrimSuffix(commitIDStrSpec, ".patch") commitID = plumbing.NewHash(commitIDStrSpecNoSuffix) if commitObj, err = repo.CommitObject(commitID); err != nil { errorPage500(writer, params, "Error getting commit object: "+err.Error()) return } if commitIDStrSpecNoSuffix != commitIDStrSpec { var patchStr string if patchStr, err = fmtCommitPatch(commitObj); err != nil { errorPage500(writer, params, "Error formatting patch: "+err.Error()) return } fmt.Fprintln(writer, patchStr) return } commitIDStr = commitObj.Hash.String() if commitIDStr != commitIDStrSpec { http.Redirect(writer, request, commitIDStr, http.StatusSeeOther) return } params["commit_object"] = commitObj params["commit_id"] = commitIDStr
parentCommitHash, patch, err = fmtCommitAsPatch(commitObj)
parentCommitHash, patch, err = commitToPatch(commitObj)
if err != nil { errorPage500(writer, params, "Error getting patch from commit: "+err.Error()) return } params["parent_commitHash"] = parentCommitHash.String() params["patch"] = patch params["file_patches"] = makeUsableFilePatches(patch) renderTemplate(writer, "repo_commit", params) } type fakeDiffFile struct { hash plumbing.Hash mode filemode.FileMode path string } func (f fakeDiffFile) Hash() plumbing.Hash { return f.hash } func (f fakeDiffFile) Mode() filemode.FileMode { return f.mode } func (f fakeDiffFile) Path() string { return f.path } var nullFakeDiffFile = fakeDiffFile{ hash: plumbing.NewHash("0000000000000000000000000000000000000000"), mode: misc.FirstOrPanic(filemode.New("100644")), path: "", } func makeUsableFilePatches(patch diff.Patch) (usableFilePatches []usableFilePatch) { // TODO: Remove unnecessary context // TODO: Prepend "+"/"-"/" " instead of solely distinguishing based on color for _, filePatch := range patch.FilePatches() { var fromFile, toFile diff.File var ufp usableFilePatch chunks := []usableChunk{} fromFile, toFile = filePatch.Files() if fromFile == nil { fromFile = nullFakeDiffFile } if toFile == nil { toFile = nullFakeDiffFile } for _, chunk := range filePatch.Chunks() { var content string content = chunk.Content() if len(content) > 0 && content[0] == '\n' { content = "\n" + content } // Horrible hack to fix how browsers newlines that immediately proceed <pre> chunks = append(chunks, usableChunk{ Operation: chunk.Type(), Content: content, }) } ufp = usableFilePatch{ Chunks: chunks, From: fromFile, To: toFile, } usableFilePatches = append(usableFilePatches, ufp) } return }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "net/http" "github.com/jackc/pgx/v5" )
// idTitleStatus describes properties of a merge request that needs to be // present in MR listings.
type idTitleStatus struct { ID int Title string Status string }
// httpHandleRepoContribIndex provides an index to merge requests of a repo.
func httpHandleRepoContribIndex(writer http.ResponseWriter, request *http.Request, params map[string]any) { var rows pgx.Rows var result []idTitleStatus var err error if rows, err = database.Query(request.Context(), "SELECT repo_local_id, COALESCE(title, 'Untitled'), status FROM merge_requests WHERE repo_id = $1", params["repo_id"], ); err != nil { errorPage500(writer, params, "Error querying merge requests: "+err.Error()) return } defer rows.Close() for rows.Next() { var mrID int var mrTitle, mrStatus string if err = rows.Scan(&mrID, &mrTitle, &mrStatus); err != nil { errorPage500(writer, params, "Error scanning merge request: "+err.Error()) return } result = append(result, idTitleStatus{mrID, mrTitle, mrStatus}) } if err = rows.Err(); err != nil { errorPage500(writer, params, "Error ranging over merge requests: "+err.Error()) return } params["merge_requests"] = result renderTemplate(writer, "repo_contrib_index", params) }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "net/http" "strconv" "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/object" )
// httpHandleRepoContribOne provides an interface to each merge request of a // repo.
func httpHandleRepoContribOne(writer http.ResponseWriter, request *http.Request, params map[string]any) { var mrIDStr string var mrIDInt int var err error var title, status, srcRefStr, dstBranchStr string var repo *git.Repository var srcRefHash plumbing.Hash var dstBranchHash plumbing.Hash var srcCommit, dstCommit, mergeBaseCommit *object.Commit var mergeBases []*object.Commit mrIDStr = params["mr_id"].(string) mrIDInt64, err := strconv.ParseInt(mrIDStr, 10, strconv.IntSize) if err != nil { errorPage400(writer, params, "Merge request ID not an integer") return } mrIDInt = int(mrIDInt64) if err = database.QueryRow(request.Context(), "SELECT COALESCE(title, ''), status, source_ref, COALESCE(destination_branch, '') FROM merge_requests WHERE repo_id = $1 AND repo_local_id = $2", params["repo_id"], mrIDInt, ).Scan(&title, &status, &srcRefStr, &dstBranchStr); err != nil { errorPage500(writer, params, "Error querying merge request: "+err.Error()) return } repo = params["repo"].(*git.Repository) if srcRefHash, err = getRefHash(repo, "branch", srcRefStr); err != nil { errorPage500(writer, params, "Error getting source ref hash: "+err.Error()) return } if srcCommit, err = repo.CommitObject(srcRefHash); err != nil { errorPage500(writer, params, "Error getting source commit: "+err.Error()) return } params["source_commit"] = srcCommit if dstBranchStr == "" { dstBranchStr = "HEAD" dstBranchHash, err = getRefHash(repo, "", "") } else { dstBranchHash, err = getRefHash(repo, "branch", dstBranchStr) } if err != nil { errorPage500(writer, params, "Error getting destination branch hash: "+err.Error()) return } if dstCommit, err = repo.CommitObject(dstBranchHash); err != nil { errorPage500(writer, params, "Error getting destination commit: "+err.Error()) return } params["destination_commit"] = dstCommit if mergeBases, err = srcCommit.MergeBase(dstCommit); err != nil { errorPage500(writer, params, "Error getting merge base: "+err.Error()) return } if len(mergeBases) < 1 { errorPage500(writer, params, "No merge base found for this merge request; these two branches do not share any common history") // TODO return } mergeBaseCommit = mergeBases[0] params["merge_base"] = mergeBaseCommit patch, err := mergeBaseCommit.Patch(srcCommit) if err != nil { errorPage500(writer, params, "Error getting patch: "+err.Error()) return } params["file_patches"] = makeUsableFilePatches(patch) params["mr_title"], params["mr_status"], params["mr_source_ref"], params["mr_destination_branch"] = title, status, srcRefStr, dstBranchStr renderTemplate(writer, "repo_contrib_one", params) }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "net/http" "strings" "time" "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/object" "github.com/go-git/go-git/v5/plumbing/storer" )
// httpHandleRepoIndex provides the front page of a repo.
func httpHandleRepoIndex(writer http.ResponseWriter, _ *http.Request, params map[string]any) { var repo *git.Repository var repoName string var groupPath []string var refHash plumbing.Hash var refHashSlice []byte var err error var commitObj *object.Commit var tree *object.Tree var notes []string var branches []string var branchesIter storer.ReferenceIter var commits []commitDisplay repo, repoName, groupPath = params["repo"].(*git.Repository), params["repo_name"].(string), params["group_path"].([]string) if strings.Contains(repoName, "\n") || sliceContainsNewlines(groupPath) { notes = append(notes, "Path contains newlines; HTTP Git access impossible") } refHash, err = getRefHash(repo, params["ref_type"].(string), params["ref_name"].(string)) if err != nil { goto no_ref } refHashSlice = refHash[:] branchesIter, err = repo.Branches() if err == nil { _ = branchesIter.ForEach(func(branch *plumbing.Reference) error { branches = append(branches, branch.Name().Short()) return nil }) } params["branches"] = branches if value, found := indexCommitsDisplayCache.Get(refHashSlice); found { if value != nil { commits = value } else { goto readme } } else { start := time.Now() commits, err = getRecentCommitsDisplay(repo, refHash, 5) if err != nil { commits = nil } cost := time.Since(start).Nanoseconds() indexCommitsDisplayCache.Set(refHashSlice, commits, cost) if err != nil { goto readme } } params["commits"] = commits readme: if value, found := treeReadmeCache.Get(refHashSlice); found { params["files"] = value.DisplayTree params["readme_filename"] = value.ReadmeFilename params["readme"] = value.ReadmeRendered } else { start := time.Now() if commitObj, err = repo.CommitObject(refHash); err != nil { goto no_ref } if tree, err = commitObj.Tree(); err != nil { goto no_ref } displayTree := makeDisplayTree(tree) readmeFilename, readmeRendered := renderReadmeAtTree(tree) cost := time.Since(start).Nanoseconds() params["files"] = displayTree params["readme_filename"] = readmeFilename params["readme"] = readmeRendered entry := treeReadmeCacheEntry{ DisplayTree: displayTree, ReadmeFilename: readmeFilename, ReadmeRendered: readmeRendered, } treeReadmeCache.Set(refHashSlice, entry, cost) } no_ref: params["http_clone_url"] = genHTTPRemoteURL(groupPath, repoName) params["ssh_clone_url"] = genSSHRemoteURL(groupPath, repoName) params["notes"] = notes renderTemplate(writer, "repo_index", params) }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "fmt" "io" "net/http" "os/exec" "github.com/jackc/pgx/v5/pgtype" )
// httpHandleRepoInfo provides advertised refs of a repo for use in Git's Smart // HTTP protocol. // // TODO: Reject access from web browsers.
func httpHandleRepoInfo(writer http.ResponseWriter, request *http.Request, params map[string]any) (err error) { groupPath := params["group_path"].([]string) repoName := params["repo_name"].(string) var repoPath string if err := database.QueryRow(request.Context(), ` WITH RECURSIVE group_path_cte AS ( -- Start: match the first name in the path where parent_group IS NULL SELECT id, parent_group, name, 1 AS depth FROM groups WHERE name = ($1::text[])[1] AND parent_group IS NULL UNION ALL -- Recurse: jion next segment of the path SELECT g.id, g.parent_group, g.name, group_path_cte.depth + 1 FROM groups g JOIN group_path_cte ON g.parent_group = group_path_cte.id WHERE g.name = ($1::text[])[group_path_cte.depth + 1] AND group_path_cte.depth + 1 <= cardinality($1::text[]) ) SELECT r.filesystem_path FROM group_path_cte c JOIN repos r ON r.group_id = c.id WHERE c.depth = cardinality($1::text[]) AND r.name = $2 `, pgtype.FlatArray[string](groupPath), repoName, ).Scan(&repoPath); err != nil { return err } writer.Header().Set("Content-Type", "application/x-git-upload-pack-advertisement") writer.WriteHeader(http.StatusOK) cmd := exec.Command("git", "upload-pack", "--stateless-rpc", "--advertise-refs", repoPath) stdout, err := cmd.StdoutPipe() if err != nil { return err } defer func() { _ = stdout.Close() }() cmd.Stderr = cmd.Stdout if err = cmd.Start(); err != nil { return err } if err = packLine(writer, "# service=git-upload-pack\n"); err != nil { return err } if err = packFlush(writer); err != nil { return } if _, err = io.Copy(writer, stdout); err != nil { return err } if err = cmd.Wait(); err != nil { return err } return nil } // Taken from https://github.com/icyphox/legit, MIT license. func packLine(w io.Writer, s string) error { _, err := fmt.Fprintf(w, "%04x%s", len(s)+4, s) return err } // Taken from https://github.com/icyphox/legit, MIT license. func packFlush(w io.Writer) error { _, err := fmt.Fprint(w, "0000") return err }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "net/http" "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/plumbing" )
// TODO: I probably shouldn't include *all* commits here...
// httpHandleRepoLog provides a page with a complete Git log. // // TODO: This currently provides all commits in the branch. It should be // paginated and cached instead.
func httpHandleRepoLog(writer http.ResponseWriter, _ *http.Request, params map[string]any) { var repo *git.Repository var refHash plumbing.Hash var err error repo = params["repo"].(*git.Repository) if refHash, err = getRefHash(repo, params["ref_type"].(string), params["ref_name"].(string)); err != nil { errorPage500(writer, params, "Error getting ref hash: "+err.Error()) return } logOptions := git.LogOptions{From: refHash} //exhaustruct:ignore commitIter, err := repo.Log(&logOptions) if err != nil { errorPage500(writer, params, "Error getting recent commits: "+err.Error()) return } params["commits"], params["commits_err"] = commitIterSeqErr(commitIter) renderTemplate(writer, "repo_log", params) }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "fmt" "net/http" "strings" "time" "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/object" )
// httpHandleRepoRaw serves raw files, or directory listings that point to raw // files.
func httpHandleRepoRaw(writer http.ResponseWriter, request *http.Request, params map[string]any) { var rawPathSpec, pathSpec string var repo *git.Repository var refHash plumbing.Hash var refHashSlice []byte var commitObj *object.Commit var tree *object.Tree var err error rawPathSpec = params["rest"].(string) repo, pathSpec = params["repo"].(*git.Repository), strings.TrimSuffix(rawPathSpec, "/") params["path_spec"] = pathSpec if refHash, err = getRefHash(repo, params["ref_type"].(string), params["ref_name"].(string)); err != nil { errorPage500(writer, params, "Error getting ref hash: "+err.Error()) return } refHashSlice = refHash[:] cacheHandle := append(refHashSlice, stringToBytes(pathSpec)...) //nolint:gocritic if value, found := treeReadmeCache.Get(cacheHandle); found { params["files"] = value.DisplayTree renderTemplate(writer, "repo_raw_dir", params) return } if value, found := commitPathFileRawCache.Get(cacheHandle); found { fmt.Fprint(writer, value) return } if commitObj, err = repo.CommitObject(refHash); err != nil { errorPage500(writer, params, "Error getting commit object: "+err.Error()) return } if tree, err = commitObj.Tree(); err != nil { errorPage500(writer, params, "Error getting file tree: "+err.Error()) return } start := time.Now() var target *object.Tree if pathSpec == "" { target = tree } else { if target, err = tree.Tree(pathSpec); err != nil { var file *object.File var fileContent string if file, err = tree.File(pathSpec); err != nil { errorPage500(writer, params, "Error retrieving path: "+err.Error()) return } if redirectNoDir(writer, request) { return } if fileContent, err = file.Contents(); err != nil { errorPage500(writer, params, "Error reading file: "+err.Error()) return } cost := time.Since(start).Nanoseconds() commitPathFileRawCache.Set(cacheHandle, fileContent, cost) fmt.Fprint(writer, fileContent) return } } if redirectDir(writer, request) { return } displayTree := makeDisplayTree(target) readmeFilename, readmeRendered := renderReadmeAtTree(target) cost := time.Since(start).Nanoseconds() params["files"] = displayTree params["readme_filename"] = readmeFilename params["readme"] = readmeRendered treeReadmeCache.Set(cacheHandle, treeReadmeCacheEntry{ DisplayTree: displayTree, ReadmeFilename: readmeFilename, ReadmeRendered: readmeRendered, }, cost) renderTemplate(writer, "repo_raw_dir", params) }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "bytes" "html/template" "net/http" "path" "strings" "time" "github.com/alecthomas/chroma/v2" chromaHTML "github.com/alecthomas/chroma/v2/formatters/html" chromaLexers "github.com/alecthomas/chroma/v2/lexers" chromaStyles "github.com/alecthomas/chroma/v2/styles" "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/object" )
// httpHandleRepoTree provides a friendly, syntax-highlighted view of // individual files, and provides directory views that link to these files. // // TODO: Do not highlight files that are too large.
func httpHandleRepoTree(writer http.ResponseWriter, request *http.Request, params map[string]any) { var rawPathSpec, pathSpec string var repo *git.Repository var refHash plumbing.Hash var refHashSlice []byte var commitObject *object.Commit var tree *object.Tree var err error rawPathSpec = params["rest"].(string) repo, pathSpec = params["repo"].(*git.Repository), strings.TrimSuffix(rawPathSpec, "/") params["path_spec"] = pathSpec if refHash, err = getRefHash(repo, params["ref_type"].(string), params["ref_name"].(string)); err != nil { errorPage500(writer, params, "Error getting ref hash: "+err.Error()) return } refHashSlice = refHash[:] cacheHandle := append(refHashSlice, stringToBytes(pathSpec)...) //nolint:gocritic if value, found := treeReadmeCache.Get(cacheHandle); found { params["files"] = value.DisplayTree params["readme_filename"] = value.ReadmeFilename params["readme"] = value.ReadmeRendered renderTemplate(writer, "repo_tree_dir", params) return } if value, found := commitPathFileHTMLCache.Get(cacheHandle); found { params["file_contents"] = value renderTemplate(writer, "repo_tree_file", params) return } start := time.Now() var target *object.Tree if pathSpec == "" { if commitObject, err = repo.CommitObject(refHash); err != nil { errorPage500(writer, params, "Error getting commit object: "+err.Error()) return } if tree, err = commitObject.Tree(); err != nil { errorPage500(writer, params, "Error getting file tree: "+err.Error()) return } displayTree := makeDisplayTree(tree) readmeFilename, readmeRendered := renderReadmeAtTree(tree) cost := time.Since(start).Nanoseconds() params["files"] = displayTree params["readme_filename"] = readmeFilename params["readme"] = readmeRendered entry := treeReadmeCacheEntry{ DisplayTree: displayTree, ReadmeFilename: readmeFilename, ReadmeRendered: readmeRendered, } treeReadmeCache.Set(cacheHandle, entry, cost) renderTemplate(writer, "repo_tree_dir", params) return } if commitObject, err = repo.CommitObject(refHash); err != nil { errorPage500(writer, params, "Error getting commit object: "+err.Error()) return } if tree, err = commitObject.Tree(); err != nil { errorPage500(writer, params, "Error getting file tree: "+err.Error()) return } if target, err = tree.Tree(pathSpec); err != nil { var file *object.File var fileContent string var lexer chroma.Lexer var iterator chroma.Iterator var style *chroma.Style var formatter *chromaHTML.Formatter var formattedHTML template.HTML if file, err = tree.File(pathSpec); err != nil { errorPage500(writer, params, "Error retrieving path: "+err.Error()) return } if redirectNoDir(writer, request) { return } if fileContent, err = file.Contents(); err != nil { errorPage500(writer, params, "Error reading file: "+err.Error()) return } lexer = chromaLexers.Match(pathSpec) if lexer == nil { lexer = chromaLexers.Fallback } if iterator, err = lexer.Tokenise(nil, fileContent); err != nil { errorPage500(writer, params, "Error tokenizing code: "+err.Error()) return } var formattedHTMLStr bytes.Buffer style = chromaStyles.Get("autumn") formatter = chromaHTML.New(chromaHTML.WithClasses(true), chromaHTML.TabWidth(8)) if err = formatter.Format(&formattedHTMLStr, style, iterator); err != nil { errorPage500(writer, params, "Error formatting code: "+err.Error()) return } formattedHTML = template.HTML(formattedHTMLStr.Bytes()) //#nosec G203 cost := time.Since(start).Nanoseconds() commitPathFileHTMLCache.Set(cacheHandle, formattedHTML, cost) params["file_contents"] = formattedHTML renderTemplate(writer, "repo_tree_file", params) return } if len(rawPathSpec) != 0 && rawPathSpec[len(rawPathSpec)-1] != '/' { http.Redirect(writer, request, path.Base(pathSpec)+"/", http.StatusSeeOther) return } displayTree := makeDisplayTree(target) readmeFilename, readmeRendered := renderReadmeAtTree(target) cost := time.Since(start).Nanoseconds() entry := treeReadmeCacheEntry{ DisplayTree: displayTree, ReadmeFilename: readmeFilename, ReadmeRendered: readmeRendered, } treeReadmeCache.Set(cacheHandle, entry, cost) params["readme_filename"], params["readme"] = readmeFilename, readmeRendered params["files"] = displayTree renderTemplate(writer, "repo_tree_dir", params) }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "io" "net/http" "os" "os/exec" "github.com/jackc/pgx/v5/pgtype" )
// httpHandleUploadPack handles incoming Git fetch/pull/clone's over the Smart // HTTP protocol.
func httpHandleUploadPack(writer http.ResponseWriter, request *http.Request, params map[string]any) (err error) { var groupPath []string var repoName string var repoPath string var stdout io.ReadCloser var stdin io.WriteCloser var cmd *exec.Cmd groupPath, repoName = params["group_path"].([]string), params["repo_name"].(string) if err := database.QueryRow(request.Context(), ` WITH RECURSIVE group_path_cte AS ( -- Start: match the first name in the path where parent_group IS NULL SELECT id, parent_group, name, 1 AS depth FROM groups WHERE name = ($1::text[])[1] AND parent_group IS NULL UNION ALL -- Recurse: jion next segment of the path SELECT g.id, g.parent_group, g.name, group_path_cte.depth + 1 FROM groups g JOIN group_path_cte ON g.parent_group = group_path_cte.id WHERE g.name = ($1::text[])[group_path_cte.depth + 1] AND group_path_cte.depth + 1 <= cardinality($1::text[]) ) SELECT r.filesystem_path FROM group_path_cte c JOIN repos r ON r.group_id = c.id WHERE c.depth = cardinality($1::text[]) AND r.name = $2 `, pgtype.FlatArray[string](groupPath), repoName, ).Scan(&repoPath); err != nil { return err } writer.Header().Set("Content-Type", "application/x-git-upload-pack-result") writer.Header().Set("Connection", "Keep-Alive") writer.Header().Set("Transfer-Encoding", "chunked") writer.WriteHeader(http.StatusOK) cmd = exec.Command("git", "upload-pack", "--stateless-rpc", repoPath) cmd.Env = append(os.Environ(), "LINDENII_FORGE_HOOKS_SOCKET_PATH="+config.Hooks.Socket) if stdout, err = cmd.StdoutPipe(); err != nil { return err } cmd.Stderr = cmd.Stdout defer func() { _ = stdout.Close() }() if stdin, err = cmd.StdinPipe(); err != nil { return err } defer func() { _ = stdin.Close() }() if err = cmd.Start(); err != nil { return err } if _, err = io.Copy(stdin, request.Body); err != nil { return err } if err = stdin.Close(); err != nil { return err } if _, err = io.Copy(writer, stdout); err != nil { return err } if err = cmd.Wait(); err != nil { return err } return nil }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "net/http" )
// httpHandleUsers is a useless stub.
func httpHandleUsers(writer http.ResponseWriter, _ *http.Request, params map[string]any) { errorPage501(writer, params) }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "errors" "net/http" "strconv" "strings" "github.com/jackc/pgx/v5" "go.lindenii.runxiyu.org/lindenii-common/clog" ) type forgeHTTPRouter struct{}
// ServeHTTP handles all incoming HTTP requests and routes them to the correct // location. // // TODO: This function is way too large.
func (router *forgeHTTPRouter) ServeHTTP(writer http.ResponseWriter, request *http.Request) { var remoteAddr string if config.HTTP.ReverseProxy { remoteAddrs, ok := request.Header["X-Forwarded-For"] if ok && len(remoteAddrs) == 1 { remoteAddr = remoteAddrs[0] } else { remoteAddr = request.RemoteAddr } } else { remoteAddr = request.RemoteAddr } clog.Info("Incoming HTTP: " + remoteAddr + " " + request.Method + " " + request.RequestURI) var segments []string var err error var sepIndex int params := make(map[string]any) if segments, _, err = parseReqURI(request.RequestURI); err != nil { errorPage400(writer, params, "Error parsing request URI: "+err.Error()) return } dirMode := false if segments[len(segments)-1] == "" { dirMode = true segments = segments[:len(segments)-1] } params["url_segments"] = segments params["dir_mode"] = dirMode params["global"] = globalData var userID int // 0 for none userID, params["username"], err = getUserFromRequest(request) params["user_id"] = userID if err != nil && !errors.Is(err, http.ErrNoCookie) && !errors.Is(err, pgx.ErrNoRows) { errorPage500(writer, params, "Error getting user info from request: "+err.Error()) return } if userID == 0 { params["user_id_string"] = "" } else { params["user_id_string"] = strconv.Itoa(userID) } if len(segments) == 0 { httpHandleIndex(writer, request, params) return } if segments[0] == ":" { if len(segments) < 2 { errorPage404(writer, params) return } else if len(segments) == 2 && redirectDir(writer, request) { return } switch segments[1] { case "man": manHandler.ServeHTTP(writer, request) return case "static": staticHandler.ServeHTTP(writer, request) return case "source": sourceHandler.ServeHTTP(writer, request) return } } if segments[0] == ":" { switch segments[1] { case "login": httpHandleLogin(writer, request, params) return case "users": httpHandleUsers(writer, request, params) return case "gc": httpHandleGC(writer, request, params) return default: errorPage404(writer, params) return } } sepIndex = -1 for i, part := range segments { if part == ":" { sepIndex = i break } } params["separator_index"] = sepIndex var groupPath []string var moduleType string var moduleName string if sepIndex > 0 { groupPath = segments[:sepIndex] } else { groupPath = segments } params["group_path"] = groupPath switch { case sepIndex == -1: if redirectDir(writer, request) { return } httpHandleGroupIndex(writer, request, params) case len(segments) == sepIndex+1: errorPage404(writer, params) return case len(segments) == sepIndex+2: errorPage404(writer, params) return default: moduleType = segments[sepIndex+1] moduleName = segments[sepIndex+2] switch moduleType { case "repos": params["repo_name"] = moduleName if len(segments) > sepIndex+3 { switch segments[sepIndex+3] { case "info": if err = httpHandleRepoInfo(writer, request, params); err != nil { errorPage500(writer, params, err.Error()) } return case "git-upload-pack": if err = httpHandleUploadPack(writer, request, params); err != nil { errorPage500(writer, params, err.Error()) } return } } if params["ref_type"], params["ref_name"], err = getParamRefTypeName(request); err != nil { if errors.Is(err, errNoRefSpec) { params["ref_type"] = "" } else { errorPage500(writer, params, "Error querying ref type: "+err.Error()) return } } // TODO: subgroups if params["repo"], params["repo_description"], params["repo_id"], err = openRepo(request.Context(), groupPath, moduleName); err != nil { errorPage500(writer, params, "Error opening repo: "+err.Error()) return } if len(segments) == sepIndex+3 { if redirectDir(writer, request) { return } httpHandleRepoIndex(writer, request, params) return } repoFeature := segments[sepIndex+3] switch repoFeature { case "tree": if anyContain(segments[sepIndex+4:], "/") { errorPage400(writer, params, "Repo tree paths may not contain slashes in any segments") return } if dirMode { params["rest"] = strings.Join(segments[sepIndex+4:], "/") + "/" } else { params["rest"] = strings.Join(segments[sepIndex+4:], "/") } if len(segments) < sepIndex+5 && redirectDir(writer, request) { return } httpHandleRepoTree(writer, request, params) case "branches": if redirectDir(writer, request) { return } httpHandleRepoBranches(writer, request, params) return case "raw": if anyContain(segments[sepIndex+4:], "/") { errorPage400(writer, params, "Repo tree paths may not contain slashes in any segments") return } if dirMode { params["rest"] = strings.Join(segments[sepIndex+4:], "/") + "/" } else { params["rest"] = strings.Join(segments[sepIndex+4:], "/") } if len(segments) < sepIndex+5 && redirectDir(writer, request) { return } httpHandleRepoRaw(writer, request, params) case "log": if len(segments) > sepIndex+4 { errorPage400(writer, params, "Too many parameters") return } if redirectDir(writer, request) { return } httpHandleRepoLog(writer, request, params) case "commit": if len(segments) != sepIndex+5 { errorPage400(writer, params, "Incorrect number of parameters") return } if redirectNoDir(writer, request) { return } params["commit_id"] = segments[sepIndex+4] httpHandleRepoCommit(writer, request, params) case "contrib": if redirectDir(writer, request) { return } switch len(segments) { case sepIndex + 4: httpHandleRepoContribIndex(writer, request, params) case sepIndex + 5: params["mr_id"] = segments[sepIndex+4] httpHandleRepoContribOne(writer, request, params) default: errorPage400(writer, params, "Too many parameters") } default: errorPage404(writer, params) return } default: errorPage404(writer, params) return } } }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "net/url"
"path"
"strings" )
// These are all trivial functions that are used in HTML templates. // See resources.go. // firstLine returns the first line of a string.
func firstLine(s string) string { before, _, _ := strings.Cut(s, "\n") return before }
func baseName(s string) string { return path.Base(s) }
// pathEscape escapes the input as an URL path segment.
func pathEscape(s string) string { return url.PathEscape(s) }
// queryEscape escapes the input as an URL query segment.
func queryEscape(s string) string { return url.QueryEscape(s) }
// dereference dereferences a pointer.
func dereference[T any](p *T) T { return *p }
// dereferenceOrZero dereferences a pointer. If the pointer is nil, the zero // value of its associated type is returned instead.
func dereferenceOrZero[T any](p *T) T { if p != nil { return *p } var z T return z }
// minus subtracts two numbers.
func minus(a, b int) int { return a - b }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "crypto/tls" "net" "go.lindenii.runxiyu.org/lindenii-common/clog" irc "go.lindenii.runxiyu.org/lindenii-irc" ) var ( ircSendBuffered chan string ircSendDirectChan chan errorBack[string] ) type errorBack[T any] struct { content T errorBack chan error } func ircBotSession() error { var err error var underlyingConn net.Conn if config.IRC.TLS { underlyingConn, err = tls.Dial(config.IRC.Net, config.IRC.Addr, nil) } else { underlyingConn, err = net.Dial(config.IRC.Net, config.IRC.Addr) } if err != nil { return err } defer underlyingConn.Close() conn := irc.NewConn(underlyingConn) logAndWriteLn := func(s string) (n int, err error) { clog.Debug("IRC tx: " + s) return conn.WriteString(s + "\r\n") } _, err = logAndWriteLn("NICK " + config.IRC.Nick) if err != nil { return err } _, err = logAndWriteLn("USER " + config.IRC.User + " 0 * :" + config.IRC.Gecos) if err != nil { return err } readLoopError := make(chan error) writeLoopAbort := make(chan struct{}) go func() { for { select { case <-writeLoopAbort: return default: } msg, line, err := conn.ReadMessage() if err != nil { readLoopError <- err return } clog.Debug("IRC rx: " + line) switch msg.Command { case "001": _, err = logAndWriteLn("JOIN #chat") if err != nil { readLoopError <- err return } case "PING": _, err = logAndWriteLn("PONG :" + msg.Args[0]) if err != nil { readLoopError <- err return } case "JOIN": c, ok := msg.Source.(irc.Client) if !ok { clog.Error("IRC server told us a non-client is joining a channel...") } if c.Nick != config.IRC.Nick { continue } default: } } }() for { select { case err = <-readLoopError: return err case line := <-ircSendBuffered: _, err = logAndWriteLn(line) if err != nil { select { case ircSendBuffered <- line: default: clog.Error("unable to requeue IRC message: " + line) } writeLoopAbort <- struct{}{} return err } case lineErrorBack := <-ircSendDirectChan: _, err = logAndWriteLn(lineErrorBack.content) lineErrorBack.errorBack <- err if err != nil { writeLoopAbort <- struct{}{} return err } } } }
// ircSendDirect sends an IRC message directly to the connection and bypasses // the buffering system.
func ircSendDirect(s string) error { ech := make(chan error, 1) ircSendDirectChan <- errorBack[string]{ content: s, errorBack: ech, } return <-ech }
// TODO: Delay and warnings?
func ircBotLoop() { ircSendBuffered = make(chan string, config.IRC.SendQ) ircSendDirectChan = make(chan errorBack[string]) for { err := ircBotSession() clog.Error("IRC error: " + err.Error()) } }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import "iter" // iterSeqLimit returns an iterator equivalent to the supplied one, but stops // after n iterations. func iterSeqLimit[T any](s iter.Seq[T], n uint) iter.Seq[T] { return func(yield func(T) bool) { var iterations uint for v := range s { if iterations > n-1 { return } if !yield(v) { return } iterations++ } } }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "bytes" "html" "html/template" "strings" "github.com/go-git/go-git/v5/plumbing/object" "github.com/microcosm-cc/bluemonday" "github.com/niklasfasching/go-org/org" "github.com/yuin/goldmark" "github.com/yuin/goldmark/extension" ) var markdownConverter = goldmark.New(goldmark.WithExtensions(extension.GFM))
// renderReadmeAtTree looks for README files in the supplied Git tree and // returns its filename and rendered (and sanitized) HTML.
func renderReadmeAtTree(tree *object.Tree) (readmeFilename string, readmeRenderedSafeHTML template.HTML) { var readmeRenderedUnsafe bytes.Buffer var readmeFile *object.File var readmeFileContents string var err error if readmeFile, err = tree.File("README"); err == nil { if readmeFileContents, err = readmeFile.Contents(); err != nil { return "Error fetching README", escapeHTML("Unable to fetch contents of README: " + err.Error()) } return "README", template.HTML("<pre>" + html.EscapeString(readmeFileContents) + "</pre>") //#nosec G203 } if readmeFile, err = tree.File("README.md"); err == nil { if readmeFileContents, err = readmeFile.Contents(); err != nil { return "Error fetching README", escapeHTML("Unable to fetch contents of README: " + err.Error()) } if err = markdownConverter.Convert(stringToBytes(readmeFileContents), &readmeRenderedUnsafe); err != nil { return "Error fetching README", escapeHTML("Unable to render README: " + err.Error()) } return "README.md", template.HTML(bluemonday.UGCPolicy().SanitizeBytes(readmeRenderedUnsafe.Bytes())) //#nosec G203 } if readmeFile, err = tree.File("README.org"); err == nil { if readmeFileContents, err = readmeFile.Contents(); err != nil { return "Error fetching README", escapeHTML("Unable to fetch contents of README: " + err.Error()) } orgHTML, err := org.New().Parse(strings.NewReader(readmeFileContents), readmeFilename).Write(org.NewHTMLWriter()) if err != nil { return "Error fetching README", escapeHTML("Unable to render README: " + err.Error()) } return "README.org", template.HTML(bluemonday.UGCPolicy().Sanitize(orgHTML)) //#nosec G203 } return "", "" }
// escapeHTML just escapes a string and wraps it in [template.HTML].
func escapeHTML(s string) template.HTML { return template.HTML(html.EscapeString(s)) //#nosec G203 }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "net/url" "strings" ) // We don't use path.Join because it collapses multiple slashes into one.
// genSSHRemoteURL generates SSH remote URLs from a given group path and repo // name.
func genSSHRemoteURL(groupPath []string, repoName string) string { return strings.TrimSuffix(config.SSH.Root, "/") + "/" + segmentsToURL(groupPath) + "/:/repos/" + url.PathEscape(repoName) }
// genHTTPRemoteURL generates HTTP remote URLs from a given group path and repo // name.
func genHTTPRemoteURL(groupPath []string, repoName string) string { return strings.TrimSuffix(config.HTTP.Root, "/") + "/" + segmentsToURL(groupPath) + "/:/repos/" + url.PathEscape(repoName) }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "embed" "html/template" "io/fs" "net/http" "github.com/tdewolff/minify/v2" "github.com/tdewolff/minify/v2/html" ) //go:embed LICENSE source.tar.gz var sourceFS embed.FS var sourceHandler = http.StripPrefix( "/:/source/", http.FileServer(http.FS(sourceFS)), ) //go:embed templates/* static/* hookc/hookc man/*.html man/*.txt man/*.css var resourcesFS embed.FS var templates *template.Template
// loadTemplates minifies and loads HTML templates.
func loadTemplates() (err error) { minifier := minify.New() minifierOptions := html.Minifier{ TemplateDelims: [2]string{"{{", "}}"}, KeepDefaultAttrVals: true, } //exhaustruct:ignore minifier.Add("text/html", &minifierOptions) templates = template.New("templates").Funcs(template.FuncMap{ "first_line": firstLine,
"base_name": baseName,
"path_escape": pathEscape, "query_escape": queryEscape, "dereference_error": dereferenceOrZero[error], "minus": minus, }) err = fs.WalkDir(resourcesFS, "templates", func(path string, d fs.DirEntry, err error) error { if err != nil { return err } if !d.IsDir() { content, err := fs.ReadFile(resourcesFS, path) if err != nil { return err } minified, err := minifier.Bytes("text/html", content) if err != nil { return err } _, err = templates.Parse(bytesToString(minified)) if err != nil { return err } } return nil }) return err } var ( staticHandler http.Handler manHandler http.Handler )
// This init sets up static and man handlers. The resulting handlers must be // used in the HTTP router, and do nothing unless called from elsewhere.
func init() { staticFS, err := fs.Sub(resourcesFS, "static") if err != nil { panic(err) } staticHandler = http.StripPrefix("/:/static/", http.FileServer(http.FS(staticFS))) manFS, err := fs.Sub(resourcesFS, "man") if err != nil { panic(err) } manHandler = http.StripPrefix("/:/man/", http.FileServer(http.FS(manFS))) }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "errors" "fmt" "os" "os/exec" gliderSSH "github.com/gliderlabs/ssh" "github.com/go-git/go-git/v5" "go.lindenii.runxiyu.org/lindenii-common/cmap" )
// packPass contains information known when handling incoming SSH connections // that then needs to be used in hook socket connection handlers. See hookc(1).
type packPass struct { session gliderSSH.Session repo *git.Repository pubkey string directAccess bool repoPath string userID int userType string repoID int groupPath []string repoName string contribReq string }
// packPasses contains hook cookies mapped to their packPass.
var packPasses = cmap.Map[string, packPass]{} // sshHandleRecvPack handles attempts to push to repos. func sshHandleRecvPack(session gliderSSH.Session, pubkey, repoIdentifier string) (err error) { groupPath, repoName, repoID, repoPath, directAccess, contribReq, userType, userID, err := getRepoInfo2(session.Context(), repoIdentifier, pubkey) if err != nil { return err } repo, err := git.PlainOpen(repoPath) if err != nil { return err } repoConf, err := repo.Config() if err != nil { return err } repoConfCore := repoConf.Raw.Section("core") if repoConfCore == nil { return errors.New("repository has no core section in config") } hooksPath := repoConfCore.OptionAll("hooksPath") if len(hooksPath) != 1 || hooksPath[0] != config.Hooks.Execs { return errors.New("repository has hooksPath set to an unexpected value") } if !directAccess { switch contribReq { case "closed": if !directAccess { return errors.New("you need direct access to push to this repo") } case "registered_user": if userType != "registered" { return errors.New("you need to be a registered user to push to this repo") } case "ssh_pubkey": fallthrough case "federated": if pubkey == "" { return errors.New("you need to have an SSH public key to push to this repo") } if userType == "" { userID, err = addUserSSH(session.Context(), pubkey) if err != nil { return err } fmt.Fprintln(session.Stderr(), "you are now registered as user ID", userID) userType = "pubkey_only" } case "public": default: panic("unknown contrib_requirements value " + contribReq) } } cookie, err := randomUrlsafeStr(16) if err != nil { fmt.Fprintln(session.Stderr(), "Error while generating cookie:", err) } packPasses.Store(cookie, packPass{ session: session, pubkey: pubkey, directAccess: directAccess, repoPath: repoPath, userID: userID, repoID: repoID, groupPath: groupPath, repoName: repoName, repo: repo, contribReq: contribReq, userType: userType, }) defer packPasses.Delete(cookie) // The Delete won't execute until proc.Wait returns unless something // horribly wrong such as a panic occurs. proc := exec.CommandContext(session.Context(), "git-receive-pack", repoPath) proc.Env = append(os.Environ(), "LINDENII_FORGE_HOOKS_SOCKET_PATH="+config.Hooks.Socket, "LINDENII_FORGE_HOOKS_COOKIE="+cookie, ) proc.Stdin = session proc.Stdout = session proc.Stderr = session.Stderr() if err = proc.Start(); err != nil { fmt.Fprintln(session.Stderr(), "Error while starting process:", err) return err } err = proc.Wait() if err != nil { fmt.Fprintln(session.Stderr(), "Error while waiting for process:", err) } return err }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "fmt" "net" "os" "strings" gliderSSH "github.com/gliderlabs/ssh" "go.lindenii.runxiyu.org/lindenii-common/ansiec" "go.lindenii.runxiyu.org/lindenii-common/clog" goSSH "golang.org/x/crypto/ssh" ) var ( serverPubkeyString string serverPubkeyFP string serverPubkey goSSH.PublicKey )
// serveSSH serves SSH on a [net.Listener]. The listener should generally be a // TCP listener, although AF_UNIX SOCK_STREAM listeners may be appropriate in // rare cases.
func serveSSH(listener net.Listener) error { var hostKeyBytes []byte var hostKey goSSH.Signer var err error var server *gliderSSH.Server if hostKeyBytes, err = os.ReadFile(config.SSH.Key); err != nil { return err } if hostKey, err = goSSH.ParsePrivateKey(hostKeyBytes); err != nil { return err } serverPubkey = hostKey.PublicKey() serverPubkeyString = bytesToString(goSSH.MarshalAuthorizedKey(serverPubkey)) serverPubkeyFP = goSSH.FingerprintSHA256(serverPubkey) server = &gliderSSH.Server{ Handler: func(session gliderSSH.Session) { clientPubkey := session.PublicKey() var clientPubkeyStr string if clientPubkey != nil { clientPubkeyStr = strings.TrimSuffix(bytesToString(goSSH.MarshalAuthorizedKey(clientPubkey)), "\n") } clog.Info("Incoming SSH: " + session.RemoteAddr().String() + " " + clientPubkeyStr + " " + session.RawCommand()) fmt.Fprintln(session.Stderr(), ansiec.Blue+"Lindenii Forge "+VERSION+", source at "+strings.TrimSuffix(config.HTTP.Root, "/")+"/:/source/"+ansiec.Reset+"\r") cmd := session.Command() if len(cmd) < 2 { fmt.Fprintln(session.Stderr(), "Insufficient arguments\r") return } switch cmd[0] { case "git-upload-pack": if len(cmd) > 2 { fmt.Fprintln(session.Stderr(), "Too many arguments\r") return } err = sshHandleUploadPack(session, clientPubkeyStr, cmd[1]) case "git-receive-pack": if len(cmd) > 2 { fmt.Fprintln(session.Stderr(), "Too many arguments\r") return } err = sshHandleRecvPack(session, clientPubkeyStr, cmd[1]) default: fmt.Fprintln(session.Stderr(), "Unsupported command: "+cmd[0]+"\r") return } if err != nil { fmt.Fprintln(session.Stderr(), err.Error()) return } }, PublicKeyHandler: func(_ gliderSSH.Context, _ gliderSSH.PublicKey) bool { return true }, KeyboardInteractiveHandler: func(_ gliderSSH.Context, _ goSSH.KeyboardInteractiveChallenge) bool { return true }, // It is intentional that we do not check any credentials and accept all connections. // This allows all users to connect and clone repositories. However, the public key // is passed to handlers, so e.g. the push handler could check the key and reject the // push if it needs to. } //exhaustruct:ignore server.AddHostKey(hostKey) if err = server.Serve(listener); err != nil { clog.Fatal(1, "Serving SSH: "+err.Error()) } return nil }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "context" "errors" "fmt" "io" "net/url" "strings" "go.lindenii.runxiyu.org/lindenii-common/ansiec" ) var errIllegalSSHRepoPath = errors.New("illegal SSH repo path")
// getRepoInfo2 also fetches repo information... it should be deprecated and // implemented in individual handlers.
func getRepoInfo2(ctx context.Context, sshPath, sshPubkey string) (groupPath []string, repoName string, repoID int, repoPath string, directAccess bool, contribReq, userType string, userID int, err error) { var segments []string var sepIndex int var moduleType, moduleName string segments = strings.Split(strings.TrimPrefix(sshPath, "/"), "/") for i, segment := range segments { var err error segments[i], err = url.PathUnescape(segment) if err != nil { return []string{}, "", 0, "", false, "", "", 0, err } } if segments[0] == ":" { return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath } sepIndex = -1 for i, part := range segments { if part == ":" { sepIndex = i break } } if segments[len(segments)-1] == "" { segments = segments[:len(segments)-1] } switch { case sepIndex == -1: return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath case len(segments) <= sepIndex+2: return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath } groupPath = segments[:sepIndex] moduleType = segments[sepIndex+1] moduleName = segments[sepIndex+2] repoName = moduleName switch moduleType { case "repos": _1, _2, _3, _4, _5, _6, _7 := getRepoInfo(ctx, groupPath, moduleName, sshPubkey) return groupPath, repoName, _1, _2, _3, _4, _5, _6, _7 default: return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath } }
// writeRedError is a helper function that basically does a Fprintf but makes // the entire thing red, in terms of ANSI escape sequences. It's useful when // producing error messages on SSH connections.
func writeRedError(w io.Writer, format string, args ...any) { fmt.Fprintln(w, ansiec.Red+fmt.Sprintf(format, args...)+ansiec.Reset) }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "errors" "net/http" "net/url" "strings" ) var ( errDupRefSpec = errors.New("duplicate ref spec") errNoRefSpec = errors.New("no ref spec") )
// getParamRefTypeName looks at the query parameters in an HTTP request and // returns its ref name and type, if any.
func getParamRefTypeName(request *http.Request) (retRefType, retRefName string, err error) { rawQuery := request.URL.RawQuery queryValues, err := url.ParseQuery(rawQuery) if err != nil { return } done := false for _, refType := range []string{"commit", "branch", "tag"} { refName, ok := queryValues[refType] if ok { if done { err = errDupRefSpec return } done = true if len(refName) != 1 { err = errDupRefSpec return } retRefName = refName[0] retRefType = refType } } if !done { err = errNoRefSpec } return }
// parseReqURI parses an HTTP request URL, and returns a slice of path segments // and the query parameters. It handles %2F correctly.
func parseReqURI(requestURI string) (segments []string, params url.Values, err error) { path, paramsStr, _ := strings.Cut(requestURI, "?") segments = strings.Split(strings.TrimPrefix(path, "/"), "/") for i, segment := range segments { segments[i], err = url.PathUnescape(segment) if err != nil { return } } params, err = url.ParseQuery(paramsStr) return }
// redirectDir returns true and redirects the user to a version of the URL with // a trailing slash, if and only if the request URL does not already have a // trailing slash.
func redirectDir(writer http.ResponseWriter, request *http.Request) bool { requestURI := request.RequestURI pathEnd := strings.IndexAny(requestURI, "?#") var path, rest string if pathEnd == -1 { path = requestURI } else { path = requestURI[:pathEnd] rest = requestURI[pathEnd:] } if !strings.HasSuffix(path, "/") { http.Redirect(writer, request, path+"/"+rest, http.StatusSeeOther) return true } return false }
// redirectNoDir returns true and redirects the user to a version of the URL // without a trailing slash, if and only if the request URL has a trailing // slash.
func redirectNoDir(writer http.ResponseWriter, request *http.Request) bool { requestURI := request.RequestURI pathEnd := strings.IndexAny(requestURI, "?#") var path, rest string if pathEnd == -1 { path = requestURI } else { path = requestURI[:pathEnd] rest = requestURI[pathEnd:] } if strings.HasSuffix(path, "/") { http.Redirect(writer, request, strings.TrimSuffix(path, "/")+rest, http.StatusSeeOther) return true } return false }
// redirectUnconditionally unconditionally redirects the user back to the // current page while preserving query parameters.
func redirectUnconditionally(writer http.ResponseWriter, request *http.Request) { requestURI := request.RequestURI pathEnd := strings.IndexAny(requestURI, "?#") var path, rest string if pathEnd == -1 { path = requestURI } else { path = requestURI[:pathEnd] rest = requestURI[pathEnd:] } http.Redirect(writer, request, path+rest, http.StatusSeeOther) }
// segmentsToURL joins URL segments to the path component of a URL. // Each segment is escaped properly first.
func segmentsToURL(segments []string) string { for i, segment := range segments { segments[i] = url.PathEscape(segment) } return strings.Join(segments, "/") }
// anyContain returns true if and only if ss contains a string that contains c.
func anyContain(ss []string, c string) bool { for _, s := range ss { if strings.Contains(s, c) { return true } } return false }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "context" "github.com/jackc/pgx/v5" )
// addUserSSH adds a new user solely based on their SSH public key. // // TODO: Audit all users of this function.
func addUserSSH(ctx context.Context, pubkey string) (userID int, err error) { var txn pgx.Tx if txn, err = database.Begin(ctx); err != nil { return } defer func() { _ = txn.Rollback(ctx) }() if err = txn.QueryRow(ctx, `INSERT INTO users (type) VALUES ('pubkey_only') RETURNING id`).Scan(&userID); err != nil { return } if _, err = txn.Exec(ctx, `INSERT INTO ssh_public_keys (key_string, user_id) VALUES ($1, $2)`, pubkey, userID); err != nil { return } err = txn.Commit(ctx) return }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import "strings"
// sliceContainsNewlines returns true if and only if the given slice contains // one or more strings that contains newlines.
func sliceContainsNewlines(s []string) bool { for _, v := range s { if strings.Contains(v, "\n") { return true } } return false }