Lindenii Project Forge
Use log/slog instead of clog
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> // //go:build linux package main import ( "bytes" "context" "encoding/binary" "errors" "fmt" "io"
"log/slog"
"net" "os" "path/filepath" "strconv" "strings" "syscall" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/object" "github.com/jackc/pgx/v5" "go.lindenii.runxiyu.org/forge/misc" "go.lindenii.runxiyu.org/lindenii-common/ansiec"
"go.lindenii.runxiyu.org/lindenii-common/clog"
) var ( errGetFD = errors.New("unable to get file descriptor") errGetUcred = errors.New("failed getsockopt") ) // hooksHandler handles a connection from hookc via the // unix socket. func hooksHandler(conn net.Conn) { var ctx context.Context var cancel context.CancelFunc var ucred *syscall.Ucred var err error var cookie []byte var packPass packPass var sshStderr io.Writer var hookRet byte defer conn.Close() ctx, cancel = context.WithCancel(context.Background()) defer cancel() // There aren't reasonable cases where someone would run this as // another user. if ucred, err = getUcred(conn); err != nil { if _, err = conn.Write([]byte{1}); err != nil { return } writeRedError(conn, "\nUnable to get peer credentials: %v", err) return } uint32uid := uint32(os.Getuid()) //#nosec G115 if ucred.Uid != uint32uid { if _, err = conn.Write([]byte{1}); err != nil { return } writeRedError(conn, "\nUID mismatch") return } cookie = make([]byte, 64) if _, err = conn.Read(cookie); err != nil { if _, err = conn.Write([]byte{1}); err != nil { return } writeRedError(conn, "\nFailed to read cookie: %v", err) return } { var ok bool packPass, ok = packPasses.Load(misc.BytesToString(cookie)) if !ok { if _, err = conn.Write([]byte{1}); err != nil { return } writeRedError(conn, "\nInvalid handler cookie") return } } sshStderr = packPass.session.Stderr() _, _ = sshStderr.Write([]byte{'\n'}) hookRet = func() byte { var argc64 uint64 if err = binary.Read(conn, binary.NativeEndian, &argc64); err != nil { writeRedError(sshStderr, "Failed to read argc: %v", err) return 1 } var args []string for range argc64 { var arg bytes.Buffer for { nextByte := make([]byte, 1) n, err := conn.Read(nextByte) if err != nil || n != 1 { writeRedError(sshStderr, "Failed to read arg: %v", err) return 1 } if nextByte[0] == 0 { break } arg.WriteByte(nextByte[0]) } args = append(args, arg.String()) } gitEnv := make(map[string]string) for { var envLine bytes.Buffer for { nextByte := make([]byte, 1) n, err := conn.Read(nextByte) if err != nil || n != 1 { writeRedError(sshStderr, "Failed to read environment variable: %v", err) return 1 } if nextByte[0] == 0 { break } envLine.WriteByte(nextByte[0]) } if envLine.Len() == 0 { break } kv := envLine.String() parts := strings.SplitN(kv, "=", 2) if len(parts) < 2 { writeRedError(sshStderr, "Invalid environment variable line: %v", kv) return 1 } gitEnv[parts[0]] = parts[1] } var stdin bytes.Buffer if _, err = io.Copy(&stdin, conn); err != nil { writeRedError(conn, "Failed to read to the stdin buffer: %v", err) } switch filepath.Base(args[0]) { case "pre-receive": if packPass.directAccess { return 0 } allOK := true for { var line, oldOID, rest, newIOID, refName string var found bool var oldHash, newHash plumbing.Hash var oldCommit, newCommit *object.Commit var pushOptCount int pushOptCount, err = strconv.Atoi(gitEnv["GIT_PUSH_OPTION_COUNT"]) if err != nil { writeRedError(sshStderr, "Failed to parse GIT_PUSH_OPTION_COUNT: %v", err) return 1 } // TODO: Allow existing users (even if they are already federated or registered) to add a federated user ID... though perhaps this should be in the normal SSH interface instead of the git push interface? // Also it'd be nice to be able to combine users or whatever if packPass.contribReq == "federated" && packPass.userType != "federated" && packPass.userType != "registered" { if pushOptCount == 0 { writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu") return 1 } for pushOptIndex := range pushOptCount { pushOpt, ok := gitEnv[fmt.Sprintf("GIT_PUSH_OPTION_%d", pushOptIndex)] if !ok { writeRedError(sshStderr, "Failed to get push option %d", pushOptIndex) return 1 } if strings.HasPrefix(pushOpt, "fedid=") { fedUserID := strings.TrimPrefix(pushOpt, "fedid=") service, username, found := strings.Cut(fedUserID, ":") if !found { writeRedError(sshStderr, "Invalid federated user identifier %#v does not contain a colon", fedUserID) return 1 } ok, err := fedauth(ctx, packPass.userID, service, username, packPass.pubkey) if err != nil { writeRedError(sshStderr, "Failed to verify federated user identifier %#v: %v", fedUserID, err) return 1 } if !ok { writeRedError(sshStderr, "Failed to verify federated user identifier %#v: you don't seem to be on the list", fedUserID) return 1 } break } if pushOptIndex == pushOptCount-1 { writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu") return 1 } } } line, err = stdin.ReadString('\n') if errors.Is(err, io.EOF) { break } else if err != nil { writeRedError(sshStderr, "Failed to read pre-receive line: %v", err) return 1 } line = line[:len(line)-1] oldOID, rest, found = strings.Cut(line, " ") if !found { writeRedError(sshStderr, "Invalid pre-receive line: %v", line) return 1 } newIOID, refName, found = strings.Cut(rest, " ") if !found { writeRedError(sshStderr, "Invalid pre-receive line: %v", line) return 1 } if strings.HasPrefix(refName, "refs/heads/contrib/") { if allZero(oldOID) { // New branch fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName) var newMRLocalID int if packPass.userID != 0 { err = database.QueryRow(ctx, "INSERT INTO merge_requests (repo_id, creator, source_ref, status) VALUES ($1, $2, $3, 'open') RETURNING repo_local_id", packPass.repoID, packPass.userID, strings.TrimPrefix(refName, "refs/heads/"), ).Scan(&newMRLocalID) } else { err = database.QueryRow(ctx, "INSERT INTO merge_requests (repo_id, source_ref, status) VALUES ($1, $2, 'open') RETURNING repo_local_id", packPass.repoID, strings.TrimPrefix(refName, "refs/heads/"), ).Scan(&newMRLocalID) } if err != nil { writeRedError(sshStderr, "Error creating merge request: %v", err) return 1 } mergeRequestWebURL := fmt.Sprintf("%s/contrib/%d/", genHTTPRemoteURL(packPass.groupPath, packPass.repoName), newMRLocalID) fmt.Fprintln(sshStderr, ansiec.Blue+"Created merge request at", mergeRequestWebURL+ansiec.Reset) select { case ircSendBuffered <- "PRIVMSG #chat :New merge request at " + mergeRequestWebURL: default:
clog.Error("IRC SendQ exceeded")
slog.Error("IRC SendQ exceeded")
} } else { // Existing contrib branch var existingMRUser int var isAncestor bool err = database.QueryRow(ctx, "SELECT COALESCE(creator, 0) FROM merge_requests WHERE source_ref = $1 AND repo_id = $2", strings.TrimPrefix(refName, "refs/heads/"), packPass.repoID, ).Scan(&existingMRUser) if err != nil { if errors.Is(err, pgx.ErrNoRows) { writeRedError(sshStderr, "No existing merge request for existing contrib branch: %v", err) } else { writeRedError(sshStderr, "Error querying for existing merge request: %v", err) } return 1 } if existingMRUser == 0 { allOK = false fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs to unowned MR)") continue } if existingMRUser != packPass.userID { allOK = false fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs another user's MR)") continue } oldHash = plumbing.NewHash(oldOID) if oldCommit, err = packPass.repo.CommitObject(oldHash); err != nil { writeRedError(sshStderr, "Daemon failed to get old commit: %v", err) return 1 } // Potential BUG: I'm not sure if new_commit is guaranteed to be // detectable as they haven't been merged into the main repo's // objects yet. But it seems to work, and I don't think there's // any reason for this to only work intermitently. newHash = plumbing.NewHash(newIOID) if newCommit, err = packPass.repo.CommitObject(newHash); err != nil { writeRedError(sshStderr, "Daemon failed to get new commit: %v", err) return 1 } if isAncestor, err = oldCommit.IsAncestor(newCommit); err != nil { writeRedError(sshStderr, "Daemon failed to check if old commit is ancestor: %v", err) return 1 } if !isAncestor { // TODO: Create MR snapshot ref instead allOK = false fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(force pushes are not supported yet)") continue } fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName) } } else { // Non-contrib branch allOK = false fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(you cannot push to branches outside of contrib/*)") } } fmt.Fprintln(sshStderr) if allOK { fmt.Fprintln(sshStderr, "Overall "+ansiec.Green+"ACK"+ansiec.Reset+" (all checks passed)") return 0 } fmt.Fprintln(sshStderr, "Overall "+ansiec.Red+"NAK"+ansiec.Reset+" (one or more branches failed checks)") return 1 default: fmt.Fprintln(sshStderr, ansiec.Red+"Invalid hook:", args[0]+ansiec.Reset) return 1 } }() fmt.Fprintln(sshStderr) _, _ = conn.Write([]byte{hookRet}) } // serveGitHooks handles connections on the specified network listener and // treats incoming connections as those from git hook handlers by spawning // sessions. The listener must be a SOCK_STREAM UNIX domain socket. The // function itself blocks. func serveGitHooks(listener net.Listener) error { for { conn, err := listener.Accept() if err != nil { return err } go hooksHandler(conn) } } // getUcred fetches connection credentials as a [syscall.Ucred] from a given // [net.Conn]. It panics when conn is not a [net.UnixConn]. func getUcred(conn net.Conn) (ucred *syscall.Ucred, err error) { unixConn := conn.(*net.UnixConn) var unixConnFD *os.File if unixConnFD, err = unixConn.File(); err != nil { return nil, errGetFD } defer unixConnFD.Close() if ucred, err = syscall.GetsockoptUcred(int(unixConnFD.Fd()), syscall.SOL_SOCKET, syscall.SO_PEERCRED); err != nil { return nil, errGetUcred } return ucred, nil } // allZero returns true if all runes in a given string are '0'. The comparison // is not constant time and must not be used in contexts where time-based side // channel attacks are a concern. func allZero(s string) bool { for _, r := range s { if r != '0' { return false } } return true }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> // //go:build !linux package main import ( "bytes" "context" "encoding/binary" "errors" "fmt" "io"
"log/slog"
"net" "path/filepath" "strconv" "strings" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/object" "github.com/jackc/pgx/v5" "go.lindenii.runxiyu.org/forge/misc" "go.lindenii.runxiyu.org/lindenii-common/ansiec"
"go.lindenii.runxiyu.org/lindenii-common/clog"
) var errGetFD = errors.New("unable to get file descriptor") // hooksHandler handles a connection from hookc via the // unix socket. func hooksHandler(conn net.Conn) { var ctx context.Context var cancel context.CancelFunc var err error var cookie []byte var packPass packPass var sshStderr io.Writer var hookRet byte defer conn.Close() ctx, cancel = context.WithCancel(context.Background()) defer cancel() // TODO: Validate that the connection is from the right user. cookie = make([]byte, 64) if _, err = conn.Read(cookie); err != nil { if _, err = conn.Write([]byte{1}); err != nil { return } writeRedError(conn, "\nFailed to read cookie: %v", err) return } { var ok bool packPass, ok = packPasses.Load(misc.BytesToString(cookie)) if !ok { if _, err = conn.Write([]byte{1}); err != nil { return } writeRedError(conn, "\nInvalid handler cookie") return } } sshStderr = packPass.session.Stderr() _, _ = sshStderr.Write([]byte{'\n'}) hookRet = func() byte { var argc64 uint64 if err = binary.Read(conn, binary.NativeEndian, &argc64); err != nil { writeRedError(sshStderr, "Failed to read argc: %v", err) return 1 } var args []string for range argc64 { var arg bytes.Buffer for { nextByte := make([]byte, 1) n, err := conn.Read(nextByte) if err != nil || n != 1 { writeRedError(sshStderr, "Failed to read arg: %v", err) return 1 } if nextByte[0] == 0 { break } arg.WriteByte(nextByte[0]) } args = append(args, arg.String()) } gitEnv := make(map[string]string) for { var envLine bytes.Buffer for { nextByte := make([]byte, 1) n, err := conn.Read(nextByte) if err != nil || n != 1 { writeRedError(sshStderr, "Failed to read environment variable: %v", err) return 1 } if nextByte[0] == 0 { break } envLine.WriteByte(nextByte[0]) } if envLine.Len() == 0 { break } kv := envLine.String() parts := strings.SplitN(kv, "=", 2) if len(parts) < 2 { writeRedError(sshStderr, "Invalid environment variable line: %v", kv) return 1 } gitEnv[parts[0]] = parts[1] } var stdin bytes.Buffer if _, err = io.Copy(&stdin, conn); err != nil { writeRedError(conn, "Failed to read to the stdin buffer: %v", err) } switch filepath.Base(args[0]) { case "pre-receive": if packPass.directAccess { return 0 } allOK := true for { var line, oldOID, rest, newIOID, refName string var found bool var oldHash, newHash plumbing.Hash var oldCommit, newCommit *object.Commit var pushOptCount int pushOptCount, err = strconv.Atoi(gitEnv["GIT_PUSH_OPTION_COUNT"]) if err != nil { writeRedError(sshStderr, "Failed to parse GIT_PUSH_OPTION_COUNT: %v", err) return 1 } // TODO: Allow existing users (even if they are already federated or registered) to add a federated user ID... though perhaps this should be in the normal SSH interface instead of the git push interface? // Also it'd be nice to be able to combine users or whatever if packPass.contribReq == "federated" && packPass.userType != "federated" && packPass.userType != "registered" { if pushOptCount == 0 { writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu") return 1 } for pushOptIndex := range pushOptCount { pushOpt, ok := gitEnv[fmt.Sprintf("GIT_PUSH_OPTION_%d", pushOptIndex)] if !ok { writeRedError(sshStderr, "Failed to get push option %d", pushOptIndex) return 1 } if strings.HasPrefix(pushOpt, "fedid=") { fedUserID := strings.TrimPrefix(pushOpt, "fedid=") service, username, found := strings.Cut(fedUserID, ":") if !found { writeRedError(sshStderr, "Invalid federated user identifier %#v does not contain a colon", fedUserID) return 1 } ok, err := fedauth(ctx, packPass.userID, service, username, packPass.pubkey) if err != nil { writeRedError(sshStderr, "Failed to verify federated user identifier %#v: %v", fedUserID, err) return 1 } if !ok { writeRedError(sshStderr, "Failed to verify federated user identifier %#v: you don't seem to be on the list", fedUserID) return 1 } break } if pushOptIndex == pushOptCount-1 { writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu") return 1 } } } line, err = stdin.ReadString('\n') if errors.Is(err, io.EOF) { break } else if err != nil { writeRedError(sshStderr, "Failed to read pre-receive line: %v", err) return 1 } line = line[:len(line)-1] oldOID, rest, found = strings.Cut(line, " ") if !found { writeRedError(sshStderr, "Invalid pre-receive line: %v", line) return 1 } newIOID, refName, found = strings.Cut(rest, " ") if !found { writeRedError(sshStderr, "Invalid pre-receive line: %v", line) return 1 } if strings.HasPrefix(refName, "refs/heads/contrib/") { if allZero(oldOID) { // New branch fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName) var newMRLocalID int if packPass.userID != 0 { err = database.QueryRow(ctx, "INSERT INTO merge_requests (repo_id, creator, source_ref, status) VALUES ($1, $2, $3, 'open') RETURNING repo_local_id", packPass.repoID, packPass.userID, strings.TrimPrefix(refName, "refs/heads/"), ).Scan(&newMRLocalID) } else { err = database.QueryRow(ctx, "INSERT INTO merge_requests (repo_id, source_ref, status) VALUES ($1, $2, 'open') RETURNING repo_local_id", packPass.repoID, strings.TrimPrefix(refName, "refs/heads/"), ).Scan(&newMRLocalID) } if err != nil { writeRedError(sshStderr, "Error creating merge request: %v", err) return 1 } mergeRequestWebURL := fmt.Sprintf("%s/contrib/%d/", genHTTPRemoteURL(packPass.groupPath, packPass.repoName), newMRLocalID) fmt.Fprintln(sshStderr, ansiec.Blue+"Created merge request at", mergeRequestWebURL+ansiec.Reset) select { case ircSendBuffered <- "PRIVMSG #chat :New merge request at " + mergeRequestWebURL: default:
clog.Error("IRC SendQ exceeded")
slog.Error("IRC SendQ exceeded")
} } else { // Existing contrib branch var existingMRUser int var isAncestor bool err = database.QueryRow(ctx, "SELECT COALESCE(creator, 0) FROM merge_requests WHERE source_ref = $1 AND repo_id = $2", strings.TrimPrefix(refName, "refs/heads/"), packPass.repoID, ).Scan(&existingMRUser) if err != nil { if errors.Is(err, pgx.ErrNoRows) { writeRedError(sshStderr, "No existing merge request for existing contrib branch: %v", err) } else { writeRedError(sshStderr, "Error querying for existing merge request: %v", err) } return 1 } if existingMRUser == 0 { allOK = false fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs to unowned MR)") continue } if existingMRUser != packPass.userID { allOK = false fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs another user's MR)") continue } oldHash = plumbing.NewHash(oldOID) if oldCommit, err = packPass.repo.CommitObject(oldHash); err != nil { writeRedError(sshStderr, "Daemon failed to get old commit: %v", err) return 1 } // Potential BUG: I'm not sure if new_commit is guaranteed to be // detectable as they haven't been merged into the main repo's // objects yet. But it seems to work, and I don't think there's // any reason for this to only work intermitently. newHash = plumbing.NewHash(newIOID) if newCommit, err = packPass.repo.CommitObject(newHash); err != nil { writeRedError(sshStderr, "Daemon failed to get new commit: %v", err) return 1 } if isAncestor, err = oldCommit.IsAncestor(newCommit); err != nil { writeRedError(sshStderr, "Daemon failed to check if old commit is ancestor: %v", err) return 1 } if !isAncestor { // TODO: Create MR snapshot ref instead allOK = false fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(force pushes are not supported yet)") continue } fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName) } } else { // Non-contrib branch allOK = false fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(you cannot push to branches outside of contrib/*)") } } fmt.Fprintln(sshStderr) if allOK { fmt.Fprintln(sshStderr, "Overall "+ansiec.Green+"ACK"+ansiec.Reset+" (all checks passed)") return 0 } fmt.Fprintln(sshStderr, "Overall "+ansiec.Red+"NAK"+ansiec.Reset+" (one or more branches failed checks)") return 1 default: fmt.Fprintln(sshStderr, ansiec.Red+"Invalid hook:", args[0]+ansiec.Reset) return 1 } }() fmt.Fprintln(sshStderr) _, _ = conn.Write([]byte{hookRet}) } // serveGitHooks handles connections on the specified network listener and // treats incoming connections as those from git hook handlers by spawning // sessions. The listener must be a SOCK_STREAM UNIX domain socket. The // function itself blocks. func serveGitHooks(listener net.Listener) error { for { conn, err := listener.Accept() if err != nil { return err } go hooksHandler(conn) } } // allZero returns true if all runes in a given string are '0'. The comparison // is not constant time and must not be used in contexts where time-based side // channel attacks are a concern. func allZero(s string) bool { for _, r := range s { if r != '0' { return false } } return true }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "errors"
"log/slog"
"net/http" "net/url" "strconv" "strings" "github.com/jackc/pgx/v5"
"go.lindenii.runxiyu.org/lindenii-common/clog"
) type forgeHTTPRouter struct{} // ServeHTTP handles all incoming HTTP requests and routes them to the correct // location. // // TODO: This function is way too large. func (router *forgeHTTPRouter) ServeHTTP(writer http.ResponseWriter, request *http.Request) { var remoteAddr string if config.HTTP.ReverseProxy { remoteAddrs, ok := request.Header["X-Forwarded-For"] if ok && len(remoteAddrs) == 1 { remoteAddr = remoteAddrs[0] } else { remoteAddr = request.RemoteAddr } } else { remoteAddr = request.RemoteAddr }
clog.Info("Incoming HTTP: " + remoteAddr + " " + request.Method + " " + request.RequestURI)
slog.Info("incoming http", "addr", remoteAddr, "method", request.Method, "uri", request.RequestURI)
var segments []string var err error var sepIndex int params := make(map[string]any) if segments, _, err = parseReqURI(request.RequestURI); err != nil { errorPage400(writer, params, "Error parsing request URI: "+err.Error()) return } dirMode := false if segments[len(segments)-1] == "" { dirMode = true segments = segments[:len(segments)-1] } params["url_segments"] = segments params["dir_mode"] = dirMode params["global"] = globalData var userID int // 0 for none userID, params["username"], err = getUserFromRequest(request) params["user_id"] = userID if err != nil && !errors.Is(err, http.ErrNoCookie) && !errors.Is(err, pgx.ErrNoRows) { errorPage500(writer, params, "Error getting user info from request: "+err.Error()) return } if userID == 0 { params["user_id_string"] = "" } else { params["user_id_string"] = strconv.Itoa(userID) } for _, v := range segments { if strings.Contains(v, ":") { errorPage400Colon(writer, params) return } } if len(segments) == 0 { httpHandleIndex(writer, request, params) return } if segments[0] == "-" { if len(segments) < 2 { errorPage404(writer, params) return } else if len(segments) == 2 && redirectDir(writer, request) { return } switch segments[1] { case "static": staticHandler.ServeHTTP(writer, request) return case "source": sourceHandler.ServeHTTP(writer, request) return } } if segments[0] == "-" { switch segments[1] { case "login": httpHandleLogin(writer, request, params) return case "users": httpHandleUsers(writer, request, params) return default: errorPage404(writer, params) return } } sepIndex = -1 for i, part := range segments { if part == "-" { sepIndex = i break } } params["separator_index"] = sepIndex var groupPath []string var moduleType string var moduleName string if sepIndex > 0 { groupPath = segments[:sepIndex] } else { groupPath = segments } params["group_path"] = groupPath switch { case sepIndex == -1: if redirectDir(writer, request) { return } httpHandleGroupIndex(writer, request, params) case len(segments) == sepIndex+1: errorPage404(writer, params) return case len(segments) == sepIndex+2: errorPage404(writer, params) return default: moduleType = segments[sepIndex+1] moduleName = segments[sepIndex+2] switch moduleType { case "repos": params["repo_name"] = moduleName if len(segments) > sepIndex+3 { switch segments[sepIndex+3] { case "info": if err = httpHandleRepoInfo(writer, request, params); err != nil { errorPage500(writer, params, err.Error()) } return case "git-upload-pack": if err = httpHandleUploadPack(writer, request, params); err != nil { errorPage500(writer, params, err.Error()) } return } } if params["ref_type"], params["ref_name"], err = getParamRefTypeName(request); err != nil { if errors.Is(err, errNoRefSpec) { params["ref_type"] = "" } else { errorPage400(writer, params, "Error querying ref type: "+err.Error()) return } } if params["repo"], params["repo_description"], params["repo_id"], _, err = openRepo(request.Context(), groupPath, moduleName); err != nil { errorPage500(writer, params, "Error opening repo: "+err.Error()) return } repoURLRoot := "/" for _, part := range segments[:sepIndex+3] { repoURLRoot = repoURLRoot + url.PathEscape(part) + "/" } params["repo_url_root"] = repoURLRoot params["repo_patch_mailing_list"] = repoURLRoot[1:len(repoURLRoot)-1] + "@" + config.LMTP.Domain params["http_clone_url"] = genHTTPRemoteURL(groupPath, moduleName) params["ssh_clone_url"] = genSSHRemoteURL(groupPath, moduleName) if len(segments) == sepIndex+3 { if redirectDir(writer, request) { return } httpHandleRepoIndex(writer, request, params) return } repoFeature := segments[sepIndex+3] switch repoFeature { case "tree": if anyContain(segments[sepIndex+4:], "/") { errorPage400(writer, params, "Repo tree paths may not contain slashes in any segments") return } if dirMode { params["rest"] = strings.Join(segments[sepIndex+4:], "/") + "/" } else { params["rest"] = strings.Join(segments[sepIndex+4:], "/") } if len(segments) < sepIndex+5 && redirectDir(writer, request) { return } httpHandleRepoTree(writer, request, params) case "branches": if redirectDir(writer, request) { return } httpHandleRepoBranches(writer, request, params) return case "raw": if anyContain(segments[sepIndex+4:], "/") { errorPage400(writer, params, "Repo tree paths may not contain slashes in any segments") return } if dirMode { params["rest"] = strings.Join(segments[sepIndex+4:], "/") + "/" } else { params["rest"] = strings.Join(segments[sepIndex+4:], "/") } if len(segments) < sepIndex+5 && redirectDir(writer, request) { return } httpHandleRepoRaw(writer, request, params) case "log": if len(segments) > sepIndex+4 { errorPage400(writer, params, "Too many parameters") return } if redirectDir(writer, request) { return } httpHandleRepoLog(writer, request, params) case "commit": if len(segments) != sepIndex+5 { errorPage400(writer, params, "Incorrect number of parameters") return } if redirectNoDir(writer, request) { return } params["commit_id"] = segments[sepIndex+4] httpHandleRepoCommit(writer, request, params) case "contrib": if redirectDir(writer, request) { return } switch len(segments) { case sepIndex + 4: httpHandleRepoContribIndex(writer, request, params) case sepIndex + 5: params["mr_id"] = segments[sepIndex+4] httpHandleRepoContribOne(writer, request, params) default: errorPage400(writer, params, "Too many parameters") } default: errorPage404(writer, params) return } default: errorPage404(writer, params) return } } }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import (
"log/slog"
"net/http"
"go.lindenii.runxiyu.org/lindenii-common/clog"
) // renderTemplate abstracts out the annoyances of reporting template rendering // errors. func renderTemplate(w http.ResponseWriter, templateName string, params map[string]any) { if err := templates.ExecuteTemplate(w, templateName, params); err != nil { http.Error(w, "error rendering template: "+err.Error(), http.StatusInternalServerError)
clog.Error(err.Error())
slog.Error("error rendering template", "error", err.Error())
} }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "crypto/tls"
"log/slog"
"net"
"go.lindenii.runxiyu.org/lindenii-common/clog"
irc "go.lindenii.runxiyu.org/lindenii-irc" ) var ( ircSendBuffered chan string ircSendDirectChan chan errorBack[string] ) type errorBack[T any] struct { content T errorBack chan error } func ircBotSession() error { var err error var underlyingConn net.Conn if config.IRC.TLS { underlyingConn, err = tls.Dial(config.IRC.Net, config.IRC.Addr, nil) } else { underlyingConn, err = net.Dial(config.IRC.Net, config.IRC.Addr) } if err != nil { return err } defer underlyingConn.Close() conn := irc.NewConn(underlyingConn) logAndWriteLn := func(s string) (n int, err error) {
clog.Debug("IRC tx: " + s)
slog.Debug("irc tx", "line", s)
return conn.WriteString(s + "\r\n") } _, err = logAndWriteLn("NICK " + config.IRC.Nick) if err != nil { return err } _, err = logAndWriteLn("USER " + config.IRC.User + " 0 * :" + config.IRC.Gecos) if err != nil { return err } readLoopError := make(chan error) writeLoopAbort := make(chan struct{}) go func() { for { select { case <-writeLoopAbort: return default: } msg, line, err := conn.ReadMessage() if err != nil { readLoopError <- err return }
clog.Debug("IRC rx: " + line)
slog.Debug("irc rx", "line", line)
switch msg.Command { case "001": _, err = logAndWriteLn("JOIN #chat") if err != nil { readLoopError <- err return } case "PING": _, err = logAndWriteLn("PONG :" + msg.Args[0]) if err != nil { readLoopError <- err return } case "JOIN": c, ok := msg.Source.(irc.Client) if !ok {
clog.Error("IRC server told us a non-client is joining a channel...")
slog.Error("unable to convert source of JOIN to client")
} if c.Nick != config.IRC.Nick { continue } default: } } }() for { select { case err = <-readLoopError: return err case line := <-ircSendBuffered: _, err = logAndWriteLn(line) if err != nil { select { case ircSendBuffered <- line: default:
clog.Error("unable to requeue IRC message: " + line)
slog.Error("unable to requeue message", "line", line)
} writeLoopAbort <- struct{}{} return err } case lineErrorBack := <-ircSendDirectChan: _, err = logAndWriteLn(lineErrorBack.content) lineErrorBack.errorBack <- err if err != nil { writeLoopAbort <- struct{}{} return err } } } } // ircSendDirect sends an IRC message directly to the connection and bypasses // the buffering system. func ircSendDirect(s string) error { ech := make(chan error, 1) ircSendDirectChan <- errorBack[string]{ content: s, errorBack: ech, } return <-ech } // TODO: Delay and warnings? func ircBotLoop() { ircSendBuffered = make(chan string, config.IRC.SendQ) ircSendDirectChan = make(chan errorBack[string]) for { err := ircBotSession()
clog.Error("IRC error: " + err.Error())
slog.Error("irc session error", "error", err)
} }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "errors" "flag" "log"
"log/slog"
"net" "net/http"
"os"
"os/exec" "syscall" "time"
"go.lindenii.runxiyu.org/lindenii-common/clog"
) func main() { configPath := flag.String( "config", "/etc/lindenii/forge.scfg", "path to configuration file", ) flag.Parse() if err := loadConfig(*configPath); err != nil {
clog.Fatal(1, "Loading configuration: "+err.Error())
slog.Error("loading configuration", "error", err) os.Exit(1)
} if err := deployHooks(); err != nil {
clog.Fatal(1, "Deploying hooks to filesystem: "+err.Error())
slog.Error("deploying hooks", "error", err) os.Exit(1)
} if err := loadTemplates(); err != nil {
clog.Fatal(1, "Loading templates: "+err.Error())
slog.Error("loading templates", "error", err) os.Exit(1)
} if err := deployGit2D(); err != nil {
clog.Fatal(1, "Deploying git2d: "+err.Error())
slog.Error("deploying git2d", "error", err) os.Exit(1)
} // Launch Git2D go func() { cmd := exec.Command(config.Git.DaemonPath, config.Git.Socket) //#nosec G204 cmd.Stderr = log.Writer() cmd.Stdout = log.Writer() if err := cmd.Run(); err != nil { panic(err) } }() // UNIX socket listener for hooks { hooksListener, err := net.Listen("unix", config.Hooks.Socket) if errors.Is(err, syscall.EADDRINUSE) {
clog.Warn("Removing existing socket " + config.Hooks.Socket)
slog.Warn("removing existing socket", "path", config.Hooks.Socket)
if err = syscall.Unlink(config.Hooks.Socket); err != nil {
clog.Fatal(1, "Removing existing socket: "+err.Error())
slog.Error("removing existing socket", "path", config.Hooks.Socket, "error", err) os.Exit(1)
} if hooksListener, err = net.Listen("unix", config.Hooks.Socket); err != nil {
clog.Fatal(1, "Listening hooks: "+err.Error())
slog.Error("listening hooks", "error", err) os.Exit(1)
} } else if err != nil {
clog.Fatal(1, "Listening hooks: "+err.Error())
slog.Error("listening hooks", "error", err) os.Exit(1)
}
clog.Info("Listening hooks on unix " + config.Hooks.Socket)
slog.Info("listening hooks on unix", "path", config.Hooks.Socket)
go func() { if err = serveGitHooks(hooksListener); err != nil {
clog.Fatal(1, "Serving hooks: "+err.Error())
slog.Error("serving hooks", "error", err) os.Exit(1)
} }() } // UNIX socket listener for LMTP { lmtpListener, err := net.Listen("unix", config.LMTP.Socket) if errors.Is(err, syscall.EADDRINUSE) {
clog.Warn("Removing existing socket " + config.LMTP.Socket)
slog.Warn("removing existing socket", "path", config.LMTP.Socket)
if err = syscall.Unlink(config.LMTP.Socket); err != nil {
clog.Fatal(1, "Removing existing socket: "+err.Error())
slog.Error("removing existing socket", "path", config.LMTP.Socket, "error", err) os.Exit(1)
} if lmtpListener, err = net.Listen("unix", config.LMTP.Socket); err != nil {
clog.Fatal(1, "Listening LMTP: "+err.Error())
slog.Error("listening LMTP", "error", err) os.Exit(1)
} } else if err != nil {
clog.Fatal(1, "Listening LMTP: "+err.Error())
slog.Error("listening LMTP", "error", err) os.Exit(1)
}
clog.Info("Listening LMTP on unix " + config.LMTP.Socket)
slog.Info("listening LMTP on unix", "path", config.LMTP.Socket)
go func() { if err = serveLMTP(lmtpListener); err != nil {
clog.Fatal(1, "Serving LMTP: "+err.Error())
slog.Error("serving LMTP", "error", err) os.Exit(1)
} }() } // SSH listener { sshListener, err := net.Listen(config.SSH.Net, config.SSH.Addr) if errors.Is(err, syscall.EADDRINUSE) && config.SSH.Net == "unix" {
clog.Warn("Removing existing socket " + config.SSH.Addr)
slog.Warn("removing existing socket", "path", config.SSH.Addr)
if err = syscall.Unlink(config.SSH.Addr); err != nil {
clog.Fatal(1, "Removing existing socket: "+err.Error())
slog.Error("removing existing socket", "path", config.SSH.Addr, "error", err) os.Exit(1)
} if sshListener, err = net.Listen(config.SSH.Net, config.SSH.Addr); err != nil {
clog.Fatal(1, "Listening SSH: "+err.Error())
slog.Error("listening SSH", "error", err) os.Exit(1)
} } else if err != nil {
clog.Fatal(1, "Listening SSH: "+err.Error())
slog.Error("listening SSH", "error", err) os.Exit(1)
}
clog.Info("Listening SSH on " + config.SSH.Net + " " + config.SSH.Addr)
slog.Info("listening SSH on", "net", config.SSH.Net, "addr", config.SSH.Addr)
go func() { if err = serveSSH(sshListener); err != nil {
clog.Fatal(1, "Serving SSH: "+err.Error())
slog.Error("serving SSH", "error", err) os.Exit(1)
} }() } // HTTP listener { httpListener, err := net.Listen(config.HTTP.Net, config.HTTP.Addr) if errors.Is(err, syscall.EADDRINUSE) && config.HTTP.Net == "unix" {
clog.Warn("Removing existing socket " + config.HTTP.Addr)
slog.Warn("removing existing socket", "path", config.HTTP.Addr)
if err = syscall.Unlink(config.HTTP.Addr); err != nil {
clog.Fatal(1, "Removing existing socket: "+err.Error())
slog.Error("removing existing socket", "path", config.HTTP.Addr, "error", err) os.Exit(1)
} if httpListener, err = net.Listen(config.HTTP.Net, config.HTTP.Addr); err != nil {
clog.Fatal(1, "Listening HTTP: "+err.Error())
slog.Error("listening HTTP", "error", err) os.Exit(1)
} } else if err != nil {
clog.Fatal(1, "Listening HTTP: "+err.Error())
slog.Error("listening HTTP", "error", err) os.Exit(1)
} server := http.Server{ Handler: &forgeHTTPRouter{}, ReadTimeout: time.Duration(config.HTTP.ReadTimeout) * time.Second, WriteTimeout: time.Duration(config.HTTP.ReadTimeout) * time.Second, IdleTimeout: time.Duration(config.HTTP.ReadTimeout) * time.Second, } //exhaustruct:ignore
clog.Info("Listening HTTP on " + config.HTTP.Net + " " + config.HTTP.Addr)
slog.Info("listening HTTP on", "net", config.HTTP.Net, "addr", config.HTTP.Addr)
go func() { if err = server.Serve(httpListener); err != nil && !errors.Is(err, http.ErrServerClosed) {
clog.Fatal(1, "Serving HTTP: "+err.Error())
slog.Error("serving HTTP", "error", err) os.Exit(1)
} }() } // IRC bot go ircBotLoop() select {} }
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package main import ( "fmt"
"log/slog"
"net" "os" "strings" gliderSSH "github.com/gliderlabs/ssh" "go.lindenii.runxiyu.org/forge/misc" "go.lindenii.runxiyu.org/lindenii-common/ansiec"
"go.lindenii.runxiyu.org/lindenii-common/clog"
goSSH "golang.org/x/crypto/ssh" ) var ( serverPubkeyString string serverPubkeyFP string serverPubkey goSSH.PublicKey ) // serveSSH serves SSH on a [net.Listener]. The listener should generally be a // TCP listener, although AF_UNIX SOCK_STREAM listeners may be appropriate in // rare cases. func serveSSH(listener net.Listener) error { var hostKeyBytes []byte var hostKey goSSH.Signer var err error var server *gliderSSH.Server if hostKeyBytes, err = os.ReadFile(config.SSH.Key); err != nil { return err } if hostKey, err = goSSH.ParsePrivateKey(hostKeyBytes); err != nil { return err } serverPubkey = hostKey.PublicKey() serverPubkeyString = misc.BytesToString(goSSH.MarshalAuthorizedKey(serverPubkey)) serverPubkeyFP = goSSH.FingerprintSHA256(serverPubkey) server = &gliderSSH.Server{ Handler: func(session gliderSSH.Session) { clientPubkey := session.PublicKey() var clientPubkeyStr string if clientPubkey != nil { clientPubkeyStr = strings.TrimSuffix(misc.BytesToString(goSSH.MarshalAuthorizedKey(clientPubkey)), "\n") }
clog.Info("Incoming SSH: " + session.RemoteAddr().String() + " " + clientPubkeyStr + " " + session.RawCommand())
slog.Info("incoming ssh", "addr", session.RemoteAddr().String(), "key", clientPubkeyStr, "command", session.RawCommand())
fmt.Fprintln(session.Stderr(), ansiec.Blue+"Lindenii Forge "+VERSION+", source at "+strings.TrimSuffix(config.HTTP.Root, "/")+"/-/source/"+ansiec.Reset+"\r") cmd := session.Command() if len(cmd) < 2 { fmt.Fprintln(session.Stderr(), "Insufficient arguments\r") return } switch cmd[0] { case "git-upload-pack": if len(cmd) > 2 { fmt.Fprintln(session.Stderr(), "Too many arguments\r") return } err = sshHandleUploadPack(session, clientPubkeyStr, cmd[1]) case "git-receive-pack": if len(cmd) > 2 { fmt.Fprintln(session.Stderr(), "Too many arguments\r") return } err = sshHandleRecvPack(session, clientPubkeyStr, cmd[1]) default: fmt.Fprintln(session.Stderr(), "Unsupported command: "+cmd[0]+"\r") return } if err != nil { fmt.Fprintln(session.Stderr(), err.Error()) return } }, PublicKeyHandler: func(_ gliderSSH.Context, _ gliderSSH.PublicKey) bool { return true }, KeyboardInteractiveHandler: func(_ gliderSSH.Context, _ goSSH.KeyboardInteractiveChallenge) bool { return true }, // It is intentional that we do not check any credentials and accept all connections. // This allows all users to connect and clone repositories. However, the public key // is passed to handlers, so e.g. the push handler could check the key and reject the // push if it needs to. } //exhaustruct:ignore server.AddHostKey(hostKey) if err = server.Serve(listener); err != nil {
clog.Fatal(1, "Serving SSH: "+err.Error())
slog.Error("error serving SSH", "error", err.Error()) os.Exit(1)
} return nil }