Lindenii Project Forge
Login

server

Lindenii Forge’s main backend daemon
Commit info
ID
da1d8f4e7c332c7109427915e6459b10209cedce
Author
Runxi Yu <me@runxiyu.org>
Author date
Sun, 06 Apr 2025 09:26:46 +0800
Committer
Runxi Yu <me@runxiyu.org>
Committer date
Sun, 06 Apr 2025 09:27:53 +0800
Actions
Move the Go stuff to ./forged/
# SPDX-License-Identifier: AGPL-3.0-only
# SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
#
# TODO: This Makefile utilizes a lot of GNU extensions. Some of them are
# unfortunately difficult to avoid as POSIX Make's pattern rules are not
# sufficiently expressive. This needs to be fixed sometime (or we might move to
# some other build system).
#

.PHONY: clean

CFLAGS = -Wall -Wextra -pedantic -std=c99 -D_GNU_SOURCE
MAN_PAGES = lindenii-forge.5 lindenii-forge-hookc.1 lindenii-forge.1 lindenii-forge-mail.5

VERSION = $(shell git describe --tags --always --dirty)
SOURCE_FILES = $(shell git ls-files)
EMBED = git2d/git2d hookc/hookc source.tar.gz $(wildcard LICENSE*) $(wildcard static/*) $(wildcard templates/*)
EMBED_ = $(EMBED:%=internal/embed/%)
EMBED_ = $(EMBED:%=forged/internal/embed/%)

forge: $(EMBED_) $(SOURCE_FILES)
	CGO_ENABLED=0 go build -o forge -ldflags '-extldflags "-f no-PIC -static" -X "go.lindenii.runxiyu.org/forge.version=$(VERSION)"' -tags 'osusergo netgo static_build' ./cmd/forge
	CGO_ENABLED=0 go build -o forge -ldflags '-extldflags "-f no-PIC -static" -X "go.lindenii.runxiyu.org/forge.version=$(VERSION)"' -tags 'osusergo netgo static_build' ./forged/cmd/forge

utils/colb:

hookc/hookc:

git2d/git2d: $(wildcard git2d/*.c)
	$(CC) $(CFLAGS) -o git2d/git2d $^ $(shell pkg-config --cflags --libs libgit2) -lpthread

clean:
	rm -rf forge utils/colb hookc/hookc git2d/git2d source.tar.gz */*.o

source.tar.gz: $(SOURCE_FILES)
	rm -f source.tar.gz
	git ls-files -z | xargs -0 tar -czf source.tar.gz

internal/embed/%: %
forged/internal/embed/%: %
	@mkdir -p $(shell dirname $@)
	@cp $^ $@
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package main

import (
	"flag"

	"go.lindenii.runxiyu.org/forge/internal/unsorted"
	"go.lindenii.runxiyu.org/forge/forged/internal/unsorted"
)

func main() {
	configPath := flag.String(
		"config",
		"/etc/lindenii/forge.scfg",
		"path to configuration file",
	)
	flag.Parse()

	s, err := unsorted.NewServer(*configPath)
	if err != nil {
		panic(err)
	}

	panic(s.Run())
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

// Package ansiec provides definitions for ANSI escape sequences.
package ansiec
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package ansiec

const (
	Black   = "\x1b[30m"
	Red     = "\x1b[31m"
	Green   = "\x1b[32m"
	Yellow  = "\x1b[33m"
	Blue    = "\x1b[34m"
	Magenta = "\x1b[35m"
	Cyan    = "\x1b[36m"
	White   = "\x1b[37m"
)

const (
	BrightBlack   = "\x1b[30;1m"
	BrightRed     = "\x1b[31;1m"
	BrightGreen   = "\x1b[32;1m"
	BrightYellow  = "\x1b[33;1m"
	BrightBlue    = "\x1b[34;1m"
	BrightMagenta = "\x1b[35;1m"
	BrightCyan    = "\x1b[36;1m"
	BrightWhite   = "\x1b[37;1m"
)
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package ansiec

const Reset = "\x1b[0m"
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package ansiec

const (
	Bold      = "\x1b[1m"
	Underline = "\x1b[4m"
	Reversed  = "\x1b[7m"
	Italic    = "\x1b[3m"
)
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

// Package database provides stubs and wrappers for databases.
package database

import (
	"context"

	"github.com/jackc/pgx/v5/pgxpool"
)

type Database struct {
	*pgxpool.Pool
}

func Open(connString string) (Database, error) {
	db, err := pgxpool.New(context.Background(), connString)
	return Database{db}, err
}
/source.tar.gz
/hookc/hookc
/git2d/git2d
/static
/templates
/LICENSE*
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

// Package embed provides embedded filesystems created in build-time.
package embed

import "embed"

//go:embed LICENSE* source.tar.gz
var Source embed.FS

//go:embed templates/* static/*
//go:embed hookc/hookc git2d/git2d
var Resources embed.FS
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

// Package git2c provides routines to interact with the git2d backend daemon.
package git2c

import (
	"fmt"
	"net"

	"git.sr.ht/~sircmpwn/go-bare"
)

type Client struct {
	SocketPath string
	conn       net.Conn
	writer     *bare.Writer
	reader     *bare.Reader
}

func NewClient(socketPath string) (*Client, error) {
	conn, err := net.Dial("unix", socketPath)
	if err != nil {
		return nil, fmt.Errorf("git2d connection failed: %w", err)
	}

	writer := bare.NewWriter(conn)
	reader := bare.NewReader(conn)

	return &Client{
		SocketPath: socketPath,
		conn:       conn,
		writer:     writer,
		reader:     reader,
	}, nil
}

func (c *Client) Close() error {
	if c.conn != nil {
		return c.conn.Close()
	}
	return nil
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package git2c

import (
	"encoding/hex"
	"errors"
	"fmt"
	"io"
)

func (c *Client) CmdIndex(repoPath string) ([]Commit, *FilenameContents, error) {
	if err := c.writer.WriteData([]byte(repoPath)); err != nil {
		return nil, nil, fmt.Errorf("sending repo path failed: %w", err)
	}
	if err := c.writer.WriteUint(1); err != nil {
		return nil, nil, fmt.Errorf("sending command failed: %w", err)
	}

	status, err := c.reader.ReadUint()
	if err != nil {
		return nil, nil, fmt.Errorf("reading status failed: %w", err)
	}
	if status != 0 {
		return nil, nil, fmt.Errorf("git2d error: %d", status)
	}

	// README
	readmeRaw, err := c.reader.ReadData()
	if err != nil {
		readmeRaw = nil
	}

	readmeFilename := "README.md" // TODO
	readme := &FilenameContents{Filename: readmeFilename, Content: readmeRaw}

	// Commits
	var commits []Commit
	for {
		id, err := c.reader.ReadData()
		if err != nil {
			if errors.Is(err, io.EOF) {
				break
			}
			return nil, nil, fmt.Errorf("reading commit ID failed: %w", err)
		}
		title, _ := c.reader.ReadData()
		authorName, _ := c.reader.ReadData()
		authorEmail, _ := c.reader.ReadData()
		authorDate, _ := c.reader.ReadData()

		commits = append(commits, Commit{
			Hash:    hex.EncodeToString(id),
			Author:  string(authorName),
			Email:   string(authorEmail),
			Date:    string(authorDate),
			Message: string(title),
		})
	}

	return commits, readme, nil
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package git2c

import (
	"errors"
	"fmt"
	"io"
)

func (c *Client) CmdTreeRaw(repoPath, pathSpec string) ([]TreeEntry, string, error) {
	if err := c.writer.WriteData([]byte(repoPath)); err != nil {
		return nil, "", fmt.Errorf("sending repo path failed: %w", err)
	}
	if err := c.writer.WriteUint(2); err != nil {
		return nil, "", fmt.Errorf("sending command failed: %w", err)
	}
	if err := c.writer.WriteData([]byte(pathSpec)); err != nil {
		return nil, "", fmt.Errorf("sending path failed: %w", err)
	}

	status, err := c.reader.ReadUint()
	if err != nil {
		return nil, "", fmt.Errorf("reading status failed: %w", err)
	}

	switch status {
	case 0:
		kind, err := c.reader.ReadUint()
		if err != nil {
			return nil, "", fmt.Errorf("reading object kind failed: %w", err)
		}

		switch kind {
		case 1:
			// Tree
			count, err := c.reader.ReadUint()
			if err != nil {
				return nil, "", fmt.Errorf("reading entry count failed: %w", err)
			}

			var files []TreeEntry
			for range count {
				typeCode, err := c.reader.ReadUint()
				if err != nil {
					return nil, "", fmt.Errorf("error reading entry type: %w", err)
				}
				mode, err := c.reader.ReadUint()
				if err != nil {
					return nil, "", fmt.Errorf("error reading entry mode: %w", err)
				}
				size, err := c.reader.ReadUint()
				if err != nil {
					return nil, "", fmt.Errorf("error reading entry size: %w", err)
				}
				name, err := c.reader.ReadData()
				if err != nil {
					return nil, "", fmt.Errorf("error reading entry name: %w", err)
				}

				files = append(files, TreeEntry{
					Name:      string(name),
					Mode:      fmt.Sprintf("%06o", mode),
					Size:      size,
					IsFile:    typeCode == 2,
					IsSubtree: typeCode == 1,
				})
			}

			return files, "", nil

		case 2:
			// Blob
			content, err := c.reader.ReadData()
			if err != nil && !errors.Is(err, io.EOF) {
				return nil, "", fmt.Errorf("error reading file content: %w", err)
			}

			return nil, string(content), nil

		default:
			return nil, "", fmt.Errorf("unknown kind: %d", kind)
		}

	case 3:
		return nil, "", fmt.Errorf("path not found: %s", pathSpec)

	default:
		return nil, "", fmt.Errorf("unknown status code: %d", status)
	}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package git2c

type Commit struct {
	Hash    string
	Author  string
	Email   string
	Date    string
	Message string
}

type FilenameContents struct {
	Filename string
	Content  []byte
}

type TreeEntry struct {
	Name      string
	Mode      string
	Size      uint64
	IsFile    bool
	IsSubtree bool
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

// Package irc provides basic IRC bot functionality.
package irc

import (
	"crypto/tls"
	"log/slog"
	"net"

	"go.lindenii.runxiyu.org/forge/internal/misc"
	"go.lindenii.runxiyu.org/forge/forged/internal/misc"
	irc "go.lindenii.runxiyu.org/lindenii-irc"
)

type Config struct {
	Net   string `scfg:"net"`
	Addr  string `scfg:"addr"`
	TLS   bool   `scfg:"tls"`
	SendQ uint   `scfg:"sendq"`
	Nick  string `scfg:"nick"`
	User  string `scfg:"user"`
	Gecos string `scfg:"gecos"`
}

type Bot struct {
	config            *Config
	ircSendBuffered   chan string
	ircSendDirectChan chan misc.ErrorBack[string]
}

func NewBot(c *Config) (b *Bot) {
	b = &Bot{
		config: c,
	}
	return
}

func (b *Bot) Connect() error {
	var err error
	var underlyingConn net.Conn
	if b.config.TLS {
		underlyingConn, err = tls.Dial(b.config.Net, b.config.Addr, nil)
	} else {
		underlyingConn, err = net.Dial(b.config.Net, b.config.Addr)
	}
	if err != nil {
		return err
	}
	defer underlyingConn.Close()

	conn := irc.NewConn(underlyingConn)

	logAndWriteLn := func(s string) (n int, err error) {
		slog.Debug("irc tx", "line", s)
		return conn.WriteString(s + "\r\n")
	}

	_, err = logAndWriteLn("NICK " + b.config.Nick)
	if err != nil {
		return err
	}
	_, err = logAndWriteLn("USER " + b.config.User + " 0 * :" + b.config.Gecos)
	if err != nil {
		return err
	}

	readLoopError := make(chan error)
	writeLoopAbort := make(chan struct{})
	go func() {
		for {
			select {
			case <-writeLoopAbort:
				return
			default:
			}

			msg, line, err := conn.ReadMessage()
			if err != nil {
				readLoopError <- err
				return
			}

			slog.Debug("irc rx", "line", line)

			switch msg.Command {
			case "001":
				_, err = logAndWriteLn("JOIN #chat")
				if err != nil {
					readLoopError <- err
					return
				}
			case "PING":
				_, err = logAndWriteLn("PONG :" + msg.Args[0])
				if err != nil {
					readLoopError <- err
					return
				}
			case "JOIN":
				c, ok := msg.Source.(irc.Client)
				if !ok {
					slog.Error("unable to convert source of JOIN to client")
				}
				if c.Nick != b.config.Nick {
					continue
				}
			default:
			}
		}
	}()

	for {
		select {
		case err = <-readLoopError:
			return err
		case line := <-b.ircSendBuffered:
			_, err = logAndWriteLn(line)
			if err != nil {
				select {
				case b.ircSendBuffered <- line:
				default:
					slog.Error("unable to requeue message", "line", line)
				}
				writeLoopAbort <- struct{}{}
				return err
			}
		case lineErrorBack := <-b.ircSendDirectChan:
			_, err = logAndWriteLn(lineErrorBack.Content)
			lineErrorBack.ErrorChan <- err
			if err != nil {
				writeLoopAbort <- struct{}{}
				return err
			}
		}
	}
}

// SendDirect sends an IRC message directly to the connection and bypasses
// the buffering system.
func (b *Bot) SendDirect(line string) error {
	ech := make(chan error, 1)

	b.ircSendDirectChan <- misc.ErrorBack[string]{
		Content:   line,
		ErrorChan: ech,
	}

	return <-ech
}

func (b *Bot) Send(line string) {
	select {
	case b.ircSendBuffered <- line:
	default:
		slog.Error("irc sendq full", "line", line)
	}
}

// TODO: Delay and warnings?
func (b *Bot) ConnectLoop() {
	b.ircSendBuffered = make(chan string, b.config.SendQ)
	b.ircSendDirectChan = make(chan misc.ErrorBack[string])

	for {
		err := b.Connect()
		slog.Error("irc session error", "error", err)
	}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package misc

type ErrorBack[T any] struct {
	Content   T
	ErrorChan chan error
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package misc

import (
	"io"
	"io/fs"
	"os"
)

func DeployBinary(src fs.File, dst string) (err error) {
	var dstFile *os.File
	if dstFile, err = os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755); err != nil {
		return err
	}
	defer dstFile.Close()
	_, err = io.Copy(dstFile, src)
	return err
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package misc

import "iter"

// iterSeqLimit returns an iterator equivalent to the supplied one, but stops
// after n iterations.
func IterSeqLimit[T any](s iter.Seq[T], n uint) iter.Seq[T] {
	return func(yield func(T) bool) {
		var iterations uint
		for v := range s {
			if iterations > n-1 {
				return
			}
			if !yield(v) {
				return
			}
			iterations++
		}
	}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

// Package misc provides miscellaneous functions and other definitions.
package misc

import "strings"

// sliceContainsNewlines returns true if and only if the given slice contains
// one or more strings that contains newlines.
func SliceContainsNewlines(s []string) bool {
	for _, v := range s {
		if strings.Contains(v, "\n") {
			return true
		}
	}
	return false
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package misc

func FirstOrPanic[T any](v T, err error) T {
	if err != nil {
		panic(err)
	}
	return v
}

func NoneOrPanic(err error) {
	if err != nil {
		panic(err)
	}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package misc

import (
	"net/url"
	"strings"
)

// These are all trivial functions that are intended to be used in HTML
// templates.

// FirstLine returns the first line of a string.
func FirstLine(s string) string {
	before, _, _ := strings.Cut(s, "\n")
	return before
}

// PathEscape escapes the input as an URL path segment.
func PathEscape(s string) string {
	return url.PathEscape(s)
}

// QueryEscape escapes the input as an URL query segment.
func QueryEscape(s string) string {
	return url.QueryEscape(s)
}

// Dereference dereferences a pointer.
func Dereference[T any](p *T) T {
	return *p
}

// DereferenceOrZero dereferences a pointer. If the pointer is nil, the zero
// value of its associated type is returned instead.
func DereferenceOrZero[T any](p *T) T {
	if p != nil {
		return *p
	}
	var z T
	return z
}

// Minus subtracts two numbers.
func Minus(a, b int) int {
	return a - b
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package misc

import "unsafe"

// StringToBytes converts a string to a byte slice without copying the string.
// Memory is borrowed from the string.
// The resulting byte slice must not be modified in any form.
func StringToBytes(s string) (bytes []byte) {
	return unsafe.Slice(unsafe.StringData(s), len(s))
}

// BytesToString converts a byte slice to a string without copying the bytes.
// Memory is borrowed from the byte slice.
// The source byte slice must not be modified.
func BytesToString(b []byte) string {
	return unsafe.String(unsafe.SliceData(b), len(b))
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package misc

import (
	"errors"
	"net/http"
	"net/url"
	"strings"
)

var (
	ErrDupRefSpec = errors.New("duplicate ref spec")
	ErrNoRefSpec  = errors.New("no ref spec")
)

// getParamRefTypeName looks at the query parameters in an HTTP request and
// returns its ref name and type, if any.
func GetParamRefTypeName(request *http.Request) (retRefType, retRefName string, err error) {
	rawQuery := request.URL.RawQuery
	queryValues, err := url.ParseQuery(rawQuery)
	if err != nil {
		return
	}
	done := false
	for _, refType := range []string{"commit", "branch", "tag"} {
		refName, ok := queryValues[refType]
		if ok {
			if done {
				err = ErrDupRefSpec
				return
			}
			done = true
			if len(refName) != 1 {
				err = ErrDupRefSpec
				return
			}
			retRefName = refName[0]
			retRefType = refType
		}
	}
	if !done {
		err = ErrNoRefSpec
	}
	return
}

// ParseReqURI parses an HTTP request URL, and returns a slice of path segments
// and the query parameters. It handles %2F correctly.
func ParseReqURI(requestURI string) (segments []string, params url.Values, err error) {
	path, paramsStr, _ := strings.Cut(requestURI, "?")

	segments, err = PathToSegments(path)
	if err != nil {
		return
	}

	params, err = url.ParseQuery(paramsStr)
	return
}

func PathToSegments(path string) (segments []string, err error) {
	segments = strings.Split(strings.TrimPrefix(path, "/"), "/")

	for i, segment := range segments {
		segments[i], err = url.PathUnescape(segment)
		if err != nil {
			return
		}
	}

	return
}

// RedirectDir returns true and redirects the user to a version of the URL with
// a trailing slash, if and only if the request URL does not already have a
// trailing slash.
func RedirectDir(writer http.ResponseWriter, request *http.Request) bool {
	requestURI := request.RequestURI

	pathEnd := strings.IndexAny(requestURI, "?#")
	var path, rest string
	if pathEnd == -1 {
		path = requestURI
	} else {
		path = requestURI[:pathEnd]
		rest = requestURI[pathEnd:]
	}

	if !strings.HasSuffix(path, "/") {
		http.Redirect(writer, request, path+"/"+rest, http.StatusSeeOther)
		return true
	}
	return false
}

// RedirectNoDir returns true and redirects the user to a version of the URL
// without a trailing slash, if and only if the request URL has a trailing
// slash.
func RedirectNoDir(writer http.ResponseWriter, request *http.Request) bool {
	requestURI := request.RequestURI

	pathEnd := strings.IndexAny(requestURI, "?#")
	var path, rest string
	if pathEnd == -1 {
		path = requestURI
	} else {
		path = requestURI[:pathEnd]
		rest = requestURI[pathEnd:]
	}

	if strings.HasSuffix(path, "/") {
		http.Redirect(writer, request, strings.TrimSuffix(path, "/")+rest, http.StatusSeeOther)
		return true
	}
	return false
}

// RedirectUnconditionally unconditionally redirects the user back to the
// current page while preserving query parameters.
func RedirectUnconditionally(writer http.ResponseWriter, request *http.Request) {
	requestURI := request.RequestURI

	pathEnd := strings.IndexAny(requestURI, "?#")
	var path, rest string
	if pathEnd == -1 {
		path = requestURI
	} else {
		path = requestURI[:pathEnd]
		rest = requestURI[pathEnd:]
	}

	http.Redirect(writer, request, path+rest, http.StatusSeeOther)
}

// SegmentsToURL joins URL segments to the path component of a URL.
// Each segment is escaped properly first.
func SegmentsToURL(segments []string) string {
	for i, segment := range segments {
		segments[i] = url.PathEscape(segment)
	}
	return strings.Join(segments, "/")
}

// AnyContain returns true if and only if ss contains a string that contains c.
func AnyContain(ss []string, c string) bool {
	for _, s := range ss {
		if strings.Contains(s, c) {
			return true
		}
	}
	return false
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package oldgit

import (
	"bytes"
	"fmt"
	"strings"
	"time"

	"github.com/go-git/go-git/v5/plumbing/object"
)

// FmtCommitPatch formats a commit object as if it was returned by
// git-format-patch.
func FmtCommitPatch(commit *object.Commit) (final string, err error) {
	var patch *object.Patch
	var buf bytes.Buffer
	var author object.Signature
	var date string
	var commitTitle, commitDetails string

	if _, patch, err = CommitToPatch(commit); err != nil {
		return "", err
	}

	author = commit.Author
	date = author.When.Format(time.RFC1123Z)

	commitTitle, commitDetails, _ = strings.Cut(commit.Message, "\n")

	// This date is hardcoded in Git.
	fmt.Fprintf(&buf, "From %s Mon Sep 17 00:00:00 2001\n", commit.Hash)
	fmt.Fprintf(&buf, "From: %s <%s>\n", author.Name, author.Email)
	fmt.Fprintf(&buf, "Date: %s\n", date)
	fmt.Fprintf(&buf, "Subject: [PATCH] %s\n\n", commitTitle)

	if commitDetails != "" {
		commitDetails1, commitDetails2, _ := strings.Cut(commitDetails, "\n")
		if strings.TrimSpace(commitDetails1) == "" {
			commitDetails = commitDetails2
		}
		buf.WriteString(commitDetails)
		buf.WriteString("\n")
	}
	buf.WriteString("---\n")
	fmt.Fprint(&buf, patch.Stats().String())
	fmt.Fprintln(&buf)

	buf.WriteString(patch.String())

	fmt.Fprintf(&buf, "\n-- \n2.48.1\n")

	return buf.String(), nil
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

// Package oldgit provides deprecated functions that depend on go-git.
package oldgit
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package oldgit

import (
	"errors"

	"github.com/go-git/go-git/v5/plumbing"
	"github.com/go-git/go-git/v5/plumbing/object"
)

// CommitToPatch creates an [object.Patch] from the first parent of a given
// [object.Commit].
//
// TODO: This function should be deprecated as it only diffs with the first
// parent and does not correctly handle merge commits.
func CommitToPatch(commit *object.Commit) (parentCommitHash plumbing.Hash, patch *object.Patch, err error) {
	var parentCommit *object.Commit
	var commitTree *object.Tree

	parentCommit, err = commit.Parent(0)
	switch {
	case errors.Is(err, object.ErrParentNotFound):
		if commitTree, err = commit.Tree(); err != nil {
			return
		}
		if patch, err = NullTree.Patch(commitTree); err != nil {
			return
		}
	case err != nil:
		return
	default:
		parentCommitHash = parentCommit.Hash
		if patch, err = parentCommit.Patch(commit); err != nil {
			return
		}
	}
	return
}

var NullTree object.Tree //nolint:gochecknoglobals
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package render

import (
	"bytes"
	"html/template"

	chromaHTML "github.com/alecthomas/chroma/v2/formatters/html"
	chromaLexers "github.com/alecthomas/chroma/v2/lexers"
	chromaStyles "github.com/alecthomas/chroma/v2/styles"
)

func Highlight(filename, content string) template.HTML {
	lexer := chromaLexers.Match(filename)
	if lexer == nil {
		lexer = chromaLexers.Fallback
	}

	iterator, err := lexer.Tokenise(nil, content)
	if err != nil {
		return template.HTML("<pre>Error tokenizing file: " + err.Error() + "</pre>") //#nosec G203`
	}

	var buf bytes.Buffer
	style := chromaStyles.Get("autumn")
	formatter := chromaHTML.New(
		chromaHTML.WithClasses(true),
		chromaHTML.TabWidth(8),
	)

	if err := formatter.Format(&buf, style, iterator); err != nil {
		return template.HTML("<pre>Error formatting file: " + err.Error() + "</pre>") //#nosec G203
	}

	return template.HTML(buf.Bytes()) //#nosec G203
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package render

import (
	"html"
	"html/template"
)

// EscapeHTML just escapes a string and wraps it in [template.HTML].
func EscapeHTML(s string) template.HTML {
	return template.HTML(html.EscapeString(s)) //#nosec G203
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package render

import (
	"bytes"
	"html"
	"html/template"
	"strings"

	"github.com/microcosm-cc/bluemonday"
	"github.com/niklasfasching/go-org/org"
	"github.com/yuin/goldmark"
	"github.com/yuin/goldmark/extension"
	"go.lindenii.runxiyu.org/forge/internal/misc"
	"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)

var markdownConverter = goldmark.New(goldmark.WithExtensions(extension.GFM)) //nolint:gochecknoglobals

// renderReadme renders and sanitizes README content from a byte slice and filename.
func Readme(data []byte, filename string) (string, template.HTML) {
	switch strings.ToLower(filename) {
	case "readme":
		return "README", template.HTML("<pre>" + html.EscapeString(misc.BytesToString(data)) + "</pre>") //#nosec G203
	case "readme.md":
		var buf bytes.Buffer
		if err := markdownConverter.Convert(data, &buf); err != nil {
			return "Error fetching README", EscapeHTML("Unable to render README: " + err.Error())
		}
		return "README.md", template.HTML(bluemonday.UGCPolicy().SanitizeBytes(buf.Bytes())) //#nosec G203
	case "readme.org":
		htmlStr, err := org.New().Parse(strings.NewReader(misc.BytesToString(data)), filename).Write(org.NewHTMLWriter())
		if err != nil {
			return "Error fetching README", EscapeHTML("Unable to render README: " + err.Error())
		}
		return "README.org", template.HTML(bluemonday.UGCPolicy().Sanitize(htmlStr)) //#nosec G203
	default:
		return filename, template.HTML("<pre>" + html.EscapeString(misc.BytesToString(data)) + "</pre>") //#nosec G203
	}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

// Package render provides functions to render code and READMEs.
package render
linters:
  enable-all: true
  disable:
    - perfsprint
    - wsl
    - varnamelen
    - nlreturn
    - exhaustruct
    - wrapcheck
    - lll
    - exhaustive
    - intrange
    - godox
    - nestif
    - err113
    - staticcheck
    - errorlint
    - cyclop
    - nonamedreturns
    - funlen
    - gochecknoglobals

issues:
  max-issues-per-linter: 0
  max-same-issues: 0
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr>

package scfg

import (
	"bufio"
	"fmt"
	"io"
	"os"
	"strings"
)

// This limits the max block nesting depth to prevent stack overflows.
const maxNestingDepth = 1000

// Load loads a configuration file.
func Load(path string) (Block, error) {
	f, err := os.Open(path)
	if err != nil {
		return nil, err
	}
	defer f.Close()

	return Read(f)
}

// Read parses a configuration file from an io.Reader.
func Read(r io.Reader) (Block, error) {
	scanner := bufio.NewScanner(r)

	dec := decoder{scanner: scanner}
	block, closingBrace, err := dec.readBlock()
	if err != nil {
		return nil, err
	} else if closingBrace {
		return nil, fmt.Errorf("line %v: unexpected '}'", dec.lineno)
	}

	return block, scanner.Err()
}

type decoder struct {
	scanner    *bufio.Scanner
	lineno     int
	blockDepth int
}

// readBlock reads a block. closingBrace is true if parsing stopped on '}'
// (otherwise, it stopped on Scanner.Scan).
func (dec *decoder) readBlock() (block Block, closingBrace bool, err error) {
	dec.blockDepth++
	defer func() {
		dec.blockDepth--
	}()

	if dec.blockDepth >= maxNestingDepth {
		return nil, false, fmt.Errorf("exceeded max block depth")
	}

	for dec.scanner.Scan() {
		dec.lineno++

		l := dec.scanner.Text()
		words, err := splitWords(l)
		if err != nil {
			return nil, false, fmt.Errorf("line %v: %v", dec.lineno, err)
		} else if len(words) == 0 {
			continue
		}

		if len(words) == 1 && l[len(l)-1] == '}' {
			closingBrace = true
			break
		}

		var d *Directive
		if words[len(words)-1] == "{" && l[len(l)-1] == '{' {
			words = words[:len(words)-1]

			var name string
			params := words
			if len(words) > 0 {
				name, params = words[0], words[1:]
			}

			startLineno := dec.lineno
			childBlock, childClosingBrace, err := dec.readBlock()
			if err != nil {
				return nil, false, err
			} else if !childClosingBrace {
				return nil, false, fmt.Errorf("line %v: unterminated block", startLineno)
			}

			// Allows callers to tell apart "no block" and "empty block"
			if childBlock == nil {
				childBlock = Block{}
			}

			d = &Directive{Name: name, Params: params, Children: childBlock, lineno: dec.lineno}
		} else {
			d = &Directive{Name: words[0], Params: words[1:], lineno: dec.lineno}
		}
		block = append(block, d)
	}

	return block, closingBrace, nil
}

func splitWords(l string) ([]string, error) {
	var (
		words   []string
		sb      strings.Builder
		escape  bool
		quote   rune
		wantWSP bool
	)
	for _, ch := range l {
		switch {
		case escape:
			sb.WriteRune(ch)
			escape = false
		case wantWSP && (ch != ' ' && ch != '\t'):
			return words, fmt.Errorf("atom not allowed after quoted string")
		case ch == '\\':
			escape = true
		case quote != 0 && ch == quote:
			quote = 0
			wantWSP = true
			if sb.Len() == 0 {
				words = append(words, "")
			}
		case quote == 0 && len(words) == 0 && sb.Len() == 0 && ch == '#':
			return nil, nil
		case quote == 0 && (ch == '\'' || ch == '"'):
			if sb.Len() > 0 {
				return words, fmt.Errorf("quoted string not allowed after atom")
			}
			quote = ch
		case quote == 0 && (ch == ' ' || ch == '\t'):
			if sb.Len() > 0 {
				words = append(words, sb.String())
			}
			sb.Reset()
			wantWSP = false
		default:
			sb.WriteRune(ch)
		}
	}
	if quote != 0 {
		return words, fmt.Errorf("unterminated quoted string")
	}
	if sb.Len() > 0 {
		words = append(words, sb.String())
	}
	return words, nil
}
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr>

// Package scfg parses and formats configuration files.
// Note that this fork of scfg behaves differently from upstream scfg.
package scfg

import (
	"fmt"
)

// Block is a list of directives.
type Block []*Directive

// GetAll returns a list of directives with the provided name.
func (blk Block) GetAll(name string) []*Directive {
	l := make([]*Directive, 0, len(blk))
	for _, child := range blk {
		if child.Name == name {
			l = append(l, child)
		}
	}
	return l
}

// Get returns the first directive with the provided name.
func (blk Block) Get(name string) *Directive {
	for _, child := range blk {
		if child.Name == name {
			return child
		}
	}
	return nil
}

// Directive is a configuration directive.
type Directive struct {
	Name   string
	Params []string

	Children Block

	lineno int
}

// ParseParams extracts parameters from the directive. It errors out if the
// user hasn't provided enough parameters.
func (d *Directive) ParseParams(params ...*string) error {
	if len(d.Params) < len(params) {
		return fmt.Errorf("directive %q: want %v params, got %v", d.Name, len(params), len(d.Params))
	}
	for i, ptr := range params {
		if ptr == nil {
			continue
		}
		*ptr = d.Params[i]
	}
	return nil
}
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr>

package scfg

import (
	"fmt"
	"reflect"
	"strings"
	"sync"
)

// structInfo contains scfg metadata for structs.
type structInfo struct {
	param    int            // index of field storing parameters
	children map[string]int // indices of fields storing child directives
}

var (
	structCacheMutex sync.Mutex
	structCache      = make(map[reflect.Type]*structInfo)
)

func getStructInfo(t reflect.Type) (*structInfo, error) {
	structCacheMutex.Lock()
	defer structCacheMutex.Unlock()

	if info := structCache[t]; info != nil {
		return info, nil
	}

	info := &structInfo{
		param:    -1,
		children: make(map[string]int),
	}

	for i := 0; i < t.NumField(); i++ {
		f := t.Field(i)
		if f.Anonymous {
			return nil, fmt.Errorf("scfg: anonymous struct fields are not supported")
		} else if !f.IsExported() {
			continue
		}

		tag := f.Tag.Get("scfg")
		parts := strings.Split(tag, ",")
		k, options := parts[0], parts[1:]
		if k == "-" {
			continue
		} else if k == "" {
			k = f.Name
		}

		isParam := false
		for _, opt := range options {
			switch opt {
			case "param":
				isParam = true
			default:
				return nil, fmt.Errorf("scfg: invalid option %q in struct tag", opt)
			}
		}

		if isParam {
			if info.param >= 0 {
				return nil, fmt.Errorf("scfg: param option specified multiple times in struct tag in %v", t)
			}
			if parts[0] != "" {
				return nil, fmt.Errorf("scfg: name must be empty when param option is specified in struct tag in %v", t)
			}
			info.param = i
		} else {
			if _, ok := info.children[k]; ok {
				return nil, fmt.Errorf("scfg: key %q specified multiple times in struct tag in %v", k, t)
			}
			info.children[k] = i
		}
	}

	structCache[t] = info
	return info, nil
}
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr>
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package scfg

import (
	"encoding"
	"fmt"
	"io"
	"reflect"
	"strconv"
)

// Decoder reads and decodes an scfg document from an input stream.
type Decoder struct {
	r                 io.Reader
	unknownDirectives []*Directive
}

// NewDecoder returns a new decoder which reads from r.
func NewDecoder(r io.Reader) *Decoder {
	return &Decoder{r: r}
}

// UnknownDirectives returns a slice of all unknown directives encountered
// during Decode.
func (dec *Decoder) UnknownDirectives() []*Directive {
	return dec.unknownDirectives
}

// Decode reads scfg document from the input and stores it in the value pointed
// to by v.
//
// If v is nil or not a pointer, Decode returns an error.
//
// Blocks can be unmarshaled to:
//
//   - Maps. Each directive is unmarshaled into a map entry. The map key must
//     be a string.
//   - Structs. Each directive is unmarshaled into a struct field.
//
// Duplicate directives are not allowed, unless the struct field or map value
// is a slice of values representing a directive: structs or maps.
//
// Directives can be unmarshaled to:
//
//   - Maps. The children block is unmarshaled into the map. Parameters are not
//     allowed.
//   - Structs. The children block is unmarshaled into the struct. Parameters
//     are allowed if one of the struct fields contains the "param" option in
//     its tag.
//   - Slices. Parameters are unmarshaled into the slice. Children blocks are
//     not allowed.
//   - Arrays. Parameters are unmarshaled into the array. The number of
//     parameters must match exactly the length of the array. Children blocks
//     are not allowed.
//   - Strings, booleans, integers, floating-point values, values implementing
//     encoding.TextUnmarshaler. Only a single parameter is allowed and is
//     unmarshaled into the value. Children blocks are not allowed.
//
// The decoding of each struct field can be customized by the format string
// stored under the "scfg" key in the struct field's tag. The tag contains the
// name of the field possibly followed by a comma-separated list of options.
// The name may be empty in order to specify options without overriding the
// default field name. As a special case, if the field name is "-", the field
// is ignored. The "param" option specifies that directive parameters are
// stored in this field (the name must be empty).
func (dec *Decoder) Decode(v interface{}) error {
	block, err := Read(dec.r)
	if err != nil {
		return err
	}

	rv := reflect.ValueOf(v)
	if rv.Kind() != reflect.Ptr || rv.IsNil() {
		return fmt.Errorf("scfg: invalid value for unmarshaling")
	}

	return dec.unmarshalBlock(block, rv)
}

func (dec *Decoder) unmarshalBlock(block Block, v reflect.Value) error {
	v = unwrapPointers(v)
	t := v.Type()

	dirsByName := make(map[string][]*Directive, len(block))
	for _, dir := range block {
		dirsByName[dir.Name] = append(dirsByName[dir.Name], dir)
	}

	switch v.Kind() {
	case reflect.Map:
		if t.Key().Kind() != reflect.String {
			return fmt.Errorf("scfg: map key type must be string")
		}
		if v.IsNil() {
			v.Set(reflect.MakeMap(t))
		} else if v.Len() > 0 {
			clearMap(v)
		}

		for name, dirs := range dirsByName {
			mv := reflect.New(t.Elem()).Elem()
			if err := dec.unmarshalDirectiveList(dirs, mv); err != nil {
				return err
			}
			v.SetMapIndex(reflect.ValueOf(name), mv)
		}

	case reflect.Struct:
		si, err := getStructInfo(t)
		if err != nil {
			return err
		}

		seen := make(map[int]bool)

		for name, dirs := range dirsByName {
			fieldIndex, ok := si.children[name]
			if !ok {
				dec.unknownDirectives = append(dec.unknownDirectives, dirs...)
				continue
			}
			fv := v.Field(fieldIndex)
			if err := dec.unmarshalDirectiveList(dirs, fv); err != nil {
				return err
			}
			seen[fieldIndex] = true
		}

		for name, fieldIndex := range si.children {
			if fieldIndex == si.param {
				continue
			}
			if _, ok := seen[fieldIndex]; !ok {
				return fmt.Errorf("scfg: missing required directive %q", name)
			}
		}

	default:
		return fmt.Errorf("scfg: unsupported type for unmarshaling blocks: %v", t)
	}

	return nil
}

func (dec *Decoder) unmarshalDirectiveList(dirs []*Directive, v reflect.Value) error {
	v = unwrapPointers(v)
	t := v.Type()

	if v.Kind() != reflect.Slice || !isDirectiveType(t.Elem()) {
		if len(dirs) > 1 {
			return newUnmarshalDirectiveError(dirs[1], "directive must not be specified more than once")
		}
		return dec.unmarshalDirective(dirs[0], v)
	}

	sv := reflect.MakeSlice(t, len(dirs), len(dirs))
	for i, dir := range dirs {
		if err := dec.unmarshalDirective(dir, sv.Index(i)); err != nil {
			return err
		}
	}
	v.Set(sv)
	return nil
}

// isDirectiveType checks whether a type can only be unmarshaled as a
// directive, not as a parameter. Accepting too many types here would result in
// ambiguities, see:
// https://lists.sr.ht/~emersion/public-inbox/%3C20230629132458.152205-1-contact%40emersion.fr%3E#%3Ch4Y2peS_YBqY3ar4XlmPDPiNBFpYGns3EBYUx3_6zWEhV2o8_-fBQveRujGADWYhVVCucHBEryFGoPtpC3d3mQ-x10pWnFogfprbQTSvtxc=@emersion.fr%3E
func isDirectiveType(t reflect.Type) bool {
	for t.Kind() == reflect.Ptr {
		t = t.Elem()
	}

	textUnmarshalerType := reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
	if reflect.PtrTo(t).Implements(textUnmarshalerType) {
		return false
	}

	switch t.Kind() {
	case reflect.Struct, reflect.Map:
		return true
	default:
		return false
	}
}

func (dec *Decoder) unmarshalDirective(dir *Directive, v reflect.Value) error {
	v = unwrapPointers(v)
	t := v.Type()

	if v.CanAddr() {
		if _, ok := v.Addr().Interface().(encoding.TextUnmarshaler); ok {
			if len(dir.Children) != 0 {
				return newUnmarshalDirectiveError(dir, "directive requires zero children")
			}
			return unmarshalParamList(dir, v)
		}
	}

	switch v.Kind() {
	case reflect.Map:
		if len(dir.Params) > 0 {
			return newUnmarshalDirectiveError(dir, "directive requires zero parameters")
		}
		if err := dec.unmarshalBlock(dir.Children, v); err != nil {
			return err
		}
	case reflect.Struct:
		si, err := getStructInfo(t)
		if err != nil {
			return err
		}

		if si.param >= 0 {
			if err := unmarshalParamList(dir, v.Field(si.param)); err != nil {
				return err
			}
		} else {
			if len(dir.Params) > 0 {
				return newUnmarshalDirectiveError(dir, "directive requires zero parameters")
			}
		}

		if err := dec.unmarshalBlock(dir.Children, v); err != nil {
			return err
		}
	default:
		if len(dir.Children) != 0 {
			return newUnmarshalDirectiveError(dir, "directive requires zero children")
		}
		if err := unmarshalParamList(dir, v); err != nil {
			return err
		}
	}
	return nil
}

func unmarshalParamList(dir *Directive, v reflect.Value) error {
	switch v.Kind() {
	case reflect.Slice:
		t := v.Type()
		sv := reflect.MakeSlice(t, len(dir.Params), len(dir.Params))
		for i, param := range dir.Params {
			if err := unmarshalParam(param, sv.Index(i)); err != nil {
				return newUnmarshalParamError(dir, i, err)
			}
		}
		v.Set(sv)
	case reflect.Array:
		if len(dir.Params) != v.Len() {
			return newUnmarshalDirectiveError(dir, fmt.Sprintf("directive requires exactly %v parameters", v.Len()))
		}
		for i, param := range dir.Params {
			if err := unmarshalParam(param, v.Index(i)); err != nil {
				return newUnmarshalParamError(dir, i, err)
			}
		}
	default:
		if len(dir.Params) != 1 {
			return newUnmarshalDirectiveError(dir, "directive requires exactly one parameter")
		}
		if err := unmarshalParam(dir.Params[0], v); err != nil {
			return newUnmarshalParamError(dir, 0, err)
		}
	}

	return nil
}

func unmarshalParam(param string, v reflect.Value) error {
	v = unwrapPointers(v)
	t := v.Type()

	// TODO: improve our logic following:
	// https://cs.opensource.google/go/go/+/refs/tags/go1.21.5:src/encoding/json/decode.go;drc=b9b8cecbfc72168ca03ad586cc2ed52b0e8db409;l=421
	if v.CanAddr() {
		if v, ok := v.Addr().Interface().(encoding.TextUnmarshaler); ok {
			return v.UnmarshalText([]byte(param))
		}
	}

	switch v.Kind() {
	case reflect.String:
		v.Set(reflect.ValueOf(param))
	case reflect.Bool:
		switch param {
		case "true":
			v.Set(reflect.ValueOf(true))
		case "false":
			v.Set(reflect.ValueOf(false))
		default:
			return fmt.Errorf("invalid bool parameter %q", param)
		}
	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
		i, err := strconv.ParseInt(param, 10, t.Bits())
		if err != nil {
			return fmt.Errorf("invalid %v parameter: %v", t, err)
		}
		v.Set(reflect.ValueOf(i).Convert(t))
	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
		u, err := strconv.ParseUint(param, 10, t.Bits())
		if err != nil {
			return fmt.Errorf("invalid %v parameter: %v", t, err)
		}
		v.Set(reflect.ValueOf(u).Convert(t))
	case reflect.Float32, reflect.Float64:
		f, err := strconv.ParseFloat(param, t.Bits())
		if err != nil {
			return fmt.Errorf("invalid %v parameter: %v", t, err)
		}
		v.Set(reflect.ValueOf(f).Convert(t))
	default:
		return fmt.Errorf("unsupported type for unmarshaling parameter: %v", t)
	}

	return nil
}

func unwrapPointers(v reflect.Value) reflect.Value {
	for v.Kind() == reflect.Ptr {
		if v.IsNil() {
			v.Set(reflect.New(v.Type().Elem()))
		}
		v = v.Elem()
	}
	return v
}

func clearMap(v reflect.Value) {
	for _, k := range v.MapKeys() {
		v.SetMapIndex(k, reflect.Value{})
	}
}

type unmarshalDirectiveError struct {
	lineno int
	name   string
	msg    string
}

func newUnmarshalDirectiveError(dir *Directive, msg string) *unmarshalDirectiveError {
	return &unmarshalDirectiveError{
		name:   dir.Name,
		lineno: dir.lineno,
		msg:    msg,
	}
}

func (err *unmarshalDirectiveError) Error() string {
	return fmt.Sprintf("line %v, directive %q: %v", err.lineno, err.name, err.msg)
}

type unmarshalParamError struct {
	lineno     int
	directive  string
	paramIndex int
	err        error
}

func newUnmarshalParamError(dir *Directive, paramIndex int, err error) *unmarshalParamError {
	return &unmarshalParamError{
		directive:  dir.Name,
		lineno:     dir.lineno,
		paramIndex: paramIndex,
		err:        err,
	}
}

func (err *unmarshalParamError) Error() string {
	return fmt.Sprintf("line %v, directive %q, parameter %v: %v", err.lineno, err.directive, err.paramIndex+1, err.err)
}
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr>

package scfg

import (
	"errors"
	"io"
	"strings"
)

var errDirEmptyName = errors.New("scfg: directive with empty name")

// Write writes a parsed configuration to the provided io.Writer.
func Write(w io.Writer, blk Block) error {
	enc := newEncoder(w)
	err := enc.encodeBlock(blk)
	return err
}

// encoder write SCFG directives to an output stream.
type encoder struct {
	w   io.Writer
	lvl int
	err error
}

// newEncoder returns a new encoder that writes to w.
func newEncoder(w io.Writer) *encoder {
	return &encoder{w: w}
}

func (enc *encoder) push() {
	enc.lvl++
}

func (enc *encoder) pop() {
	enc.lvl--
}

func (enc *encoder) writeIndent() {
	for i := 0; i < enc.lvl; i++ {
		enc.write([]byte("\t"))
	}
}

func (enc *encoder) write(p []byte) {
	if enc.err != nil {
		return
	}
	_, enc.err = enc.w.Write(p)
}

func (enc *encoder) encodeBlock(blk Block) error {
	for _, dir := range blk {
		if err := enc.encodeDir(*dir); err != nil {
			return err
		}
	}
	return enc.err
}

func (enc *encoder) encodeDir(dir Directive) error {
	if enc.err != nil {
		return enc.err
	}

	if dir.Name == "" {
		enc.err = errDirEmptyName
		return enc.err
	}

	enc.writeIndent()
	enc.write([]byte(maybeQuote(dir.Name)))
	for _, p := range dir.Params {
		enc.write([]byte(" "))
		enc.write([]byte(maybeQuote(p)))
	}

	if len(dir.Children) > 0 {
		enc.write([]byte(" {\n"))
		enc.push()
		if err := enc.encodeBlock(dir.Children); err != nil {
			return err
		}
		enc.pop()

		enc.writeIndent()
		enc.write([]byte("}"))
	}
	enc.write([]byte("\n"))

	return enc.err
}

const specialChars = "\"\\\r\n'{} \t"

func maybeQuote(s string) string {
	if s == "" || strings.ContainsAny(s, specialChars) {
		var sb strings.Builder
		sb.WriteByte('"')
		for _, ch := range s {
			if strings.ContainsRune(`"\`, ch) {
				sb.WriteByte('\\')
			}
			sb.WriteRune(ch)
		}
		sb.WriteByte('"')
		return sb.String()
	}
	return s
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"context"

	"github.com/jackc/pgx/v5/pgtype"
)

// getRepoInfo returns the filesystem path and direct access permission for a
// given repo and a provided ssh public key.
//
// TODO: Revamp.
func (s *Server) getRepoInfo(ctx context.Context, groupPath []string, repoName, sshPubkey string) (repoID int, fsPath string, access bool, contribReq, userType string, userID int, err error) {
	err = s.database.QueryRow(ctx, `
WITH RECURSIVE group_path_cte AS (
	-- Start: match the first name in the path where parent_group IS NULL
	SELECT
		id,
		parent_group,
		name,
		1 AS depth
	FROM groups
	WHERE name = ($1::text[])[1]
		AND parent_group IS NULL

	UNION ALL

	-- Recurse: join next segment of the path
	SELECT
		g.id,
		g.parent_group,
		g.name,
		group_path_cte.depth + 1
	FROM groups g
	JOIN group_path_cte ON g.parent_group = group_path_cte.id
	WHERE g.name = ($1::text[])[group_path_cte.depth + 1]
		AND group_path_cte.depth + 1 <= cardinality($1::text[])
)
SELECT
	r.id,
	r.filesystem_path,
	CASE WHEN ugr.user_id IS NOT NULL THEN TRUE ELSE FALSE END AS has_role_in_group,
	r.contrib_requirements,
	COALESCE(u.type, ''),
	COALESCE(u.id, 0)
FROM group_path_cte g
JOIN repos r ON r.group_id = g.id
LEFT JOIN ssh_public_keys s ON s.key_string = $3
LEFT JOIN users u ON u.id = s.user_id
LEFT JOIN user_group_roles ugr ON ugr.group_id = g.id AND ugr.user_id = u.id
WHERE g.depth = cardinality($1::text[])
	AND r.name = $2
`, pgtype.FlatArray[string](groupPath), repoName, sshPubkey,
	).Scan(&repoID, &fsPath, &access, &contribReq, &userType, &userID)
	return
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"bufio"
	"errors"
	"log/slog"
	"os"

	"go.lindenii.runxiyu.org/forge/internal/database"
	"go.lindenii.runxiyu.org/forge/internal/irc"
	"go.lindenii.runxiyu.org/forge/internal/scfg"
	"go.lindenii.runxiyu.org/forge/forged/internal/database"
	"go.lindenii.runxiyu.org/forge/forged/internal/irc"
	"go.lindenii.runxiyu.org/forge/forged/internal/scfg"
)

type Config struct {
	HTTP struct {
		Net          string `scfg:"net"`
		Addr         string `scfg:"addr"`
		CookieExpiry int    `scfg:"cookie_expiry"`
		Root         string `scfg:"root"`
		ReadTimeout  uint32 `scfg:"read_timeout"`
		WriteTimeout uint32 `scfg:"write_timeout"`
		IdleTimeout  uint32 `scfg:"idle_timeout"`
		ReverseProxy bool   `scfg:"reverse_proxy"`
	} `scfg:"http"`
	Hooks struct {
		Socket string `scfg:"socket"`
		Execs  string `scfg:"execs"`
	} `scfg:"hooks"`
	LMTP struct {
		Socket       string `scfg:"socket"`
		Domain       string `scfg:"domain"`
		MaxSize      int64  `scfg:"max_size"`
		WriteTimeout uint32 `scfg:"write_timeout"`
		ReadTimeout  uint32 `scfg:"read_timeout"`
	} `scfg:"lmtp"`
	Git struct {
		RepoDir    string `scfg:"repo_dir"`
		Socket     string `scfg:"socket"`
		DaemonPath string `scfg:"daemon_path"`
	} `scfg:"git"`
	SSH struct {
		Net  string `scfg:"net"`
		Addr string `scfg:"addr"`
		Key  string `scfg:"key"`
		Root string `scfg:"root"`
	} `scfg:"ssh"`
	IRC     irc.Config `scfg:"irc"`
	General struct {
		Title string `scfg:"title"`
	} `scfg:"general"`
	DB struct {
		Type string `scfg:"type"`
		Conn string `scfg:"conn"`
	} `scfg:"db"`
}

// LoadConfig loads a configuration file from the specified path and unmarshals
// it to the global [config] struct. This may race with concurrent reads from
// [config]; additional synchronization is necessary if the configuration is to
// be made reloadable.
func (s *Server) loadConfig(path string) (err error) {
	var configFile *os.File
	if configFile, err = os.Open(path); err != nil {
		return err
	}
	defer configFile.Close()

	decoder := scfg.NewDecoder(bufio.NewReader(configFile))
	if err = decoder.Decode(&s.config); err != nil {
		return err
	}
	for _, u := range decoder.UnknownDirectives() {
		slog.Warn("unknown configuration directive", "directive", u)
	}

	if s.config.DB.Type != "postgres" {
		return errors.New("unsupported database type")
	}

	if s.database, err = database.Open(s.config.DB.Conn); err != nil {
		return err
	}

	s.globalData["forge_title"] = s.config.General.Title

	return nil
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"context"

	"github.com/jackc/pgx/v5"
)

// TODO: All database handling logic in all request handlers must be revamped.
// We must ensure that each request has all logic in one transaction (subject
// to exceptions if appropriate) so they get a consistent view of the database
// at a single point. A failure to do so may cause things as serious as
// privilege escalation.

// queryNameDesc is a helper function that executes a query and returns a
// list of nameDesc results. The query must return two string arguments, i.e. a
// name and a description.
func (s *Server) queryNameDesc(ctx context.Context, query string, args ...any) (result []nameDesc, err error) {
	var rows pgx.Rows

	if rows, err = s.database.Query(ctx, query, args...); err != nil {
		return nil, err
	}
	defer rows.Close()

	for rows.Next() {
		var name, description string
		if err = rows.Scan(&name, &description); err != nil {
			return nil, err
		}
		result = append(result, nameDesc{name, description})
	}
	return result, rows.Err()
}

// nameDesc holds a name and a description.
type nameDesc struct {
	Name        string
	Description string
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"bufio"
	"context"
	"errors"
	"io"
	"net/http"
	"net/url"
	"strings"

	"github.com/jackc/pgx/v5"
)

// fedauth checks whether a user's SSH public key matches the remote username
// they claim to have on the service. If so, the association is recorded.
func (s *Server) fedauth(ctx context.Context, userID int, service, remoteUsername, pubkey string) (bool, error) {
	var err error

	matched := false
	usernameEscaped := url.PathEscape(remoteUsername)

	var req *http.Request
	switch service {
	// TODO: Services should be configurable by the instance administrator
	// and should not be hardcoded in the source code.
	case "sr.ht":
		req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://meta.sr.ht/~"+usernameEscaped+".keys", nil)
	case "github":
		req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://github.com/"+usernameEscaped+".keys", nil)
	case "codeberg":
		req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://codeberg.org/"+usernameEscaped+".keys", nil)
	case "tangled":
		req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://tangled.sh/keys/"+usernameEscaped, nil)
		// TODO: Don't rely on one webview
	default:
		return false, errors.New("unknown federated service")
	}
	if err != nil {
		return false, err
	}

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return false, err
	}
	defer func() {
		_ = resp.Body.Close()
	}()
	buf := bufio.NewReader(resp.Body)

	for {
		line, err := buf.ReadString('\n')
		if errors.Is(err, io.EOF) {
			break
		} else if err != nil {
			return false, err
		}

		lineSplit := strings.Split(line, " ")
		if len(lineSplit) < 2 {
			continue
		}
		line = strings.Join(lineSplit[:2], " ")

		if line == pubkey {
			matched = true
			break
		}
	}

	if !matched {
		return false, nil
	}

	var txn pgx.Tx
	if txn, err = s.database.Begin(ctx); err != nil {
		return false, err
	}
	defer func() {
		_ = txn.Rollback(ctx)
	}()
	if _, err = txn.Exec(ctx, `UPDATE users SET type = 'federated' WHERE id = $1 AND type = 'pubkey_only'`, userID); err != nil {
		return false, err
	}
	if _, err = txn.Exec(ctx, `INSERT INTO federated_identities (user_id, service, remote_username) VALUES ($1, $2, $3)`, userID, service, remoteUsername); err != nil {
		return false, err
	}
	if err = txn.Commit(ctx); err != nil {
		return false, err
	}

	return true, nil
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
//
//go:build linux

package unsorted

import (
	"bytes"
	"context"
	"encoding/binary"
	"errors"
	"fmt"
	"io"
	"net"
	"os"
	"path/filepath"
	"strconv"
	"strings"
	"syscall"

	"github.com/go-git/go-git/v5/plumbing"
	"github.com/go-git/go-git/v5/plumbing/object"
	"github.com/jackc/pgx/v5"
	"go.lindenii.runxiyu.org/forge/internal/ansiec"
	"go.lindenii.runxiyu.org/forge/internal/misc"
	"go.lindenii.runxiyu.org/forge/forged/internal/ansiec"
	"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)

var (
	errGetFD    = errors.New("unable to get file descriptor")
	errGetUcred = errors.New("failed getsockopt")
)

// hooksHandler handles a connection from hookc via the
// unix socket.
func (s *Server) hooksHandler(conn net.Conn) {
	var ctx context.Context
	var cancel context.CancelFunc
	var ucred *syscall.Ucred
	var err error
	var cookie []byte
	var packPass packPass
	var sshStderr io.Writer
	var hookRet byte

	defer conn.Close()
	ctx, cancel = context.WithCancel(context.Background())
	defer cancel()

	// There aren't reasonable cases where someone would run this as
	// another user.
	if ucred, err = getUcred(conn); err != nil {
		if _, err = conn.Write([]byte{1}); err != nil {
			return
		}
		writeRedError(conn, "\nUnable to get peer credentials: %v", err)
		return
	}
	uint32uid := uint32(os.Getuid()) //#nosec G115
	if ucred.Uid != uint32uid {
		if _, err = conn.Write([]byte{1}); err != nil {
			return
		}
		writeRedError(conn, "\nUID mismatch")
		return
	}

	cookie = make([]byte, 64)
	if _, err = conn.Read(cookie); err != nil {
		if _, err = conn.Write([]byte{1}); err != nil {
			return
		}
		writeRedError(conn, "\nFailed to read cookie: %v", err)
		return
	}

	{
		var ok bool
		packPass, ok = s.packPasses.Load(misc.BytesToString(cookie))
		if !ok {
			if _, err = conn.Write([]byte{1}); err != nil {
				return
			}
			writeRedError(conn, "\nInvalid handler cookie")
			return
		}
	}

	sshStderr = packPass.session.Stderr()

	_, _ = sshStderr.Write([]byte{'\n'})

	hookRet = func() byte {
		var argc64 uint64
		if err = binary.Read(conn, binary.NativeEndian, &argc64); err != nil {
			writeRedError(sshStderr, "Failed to read argc: %v", err)
			return 1
		}
		var args []string
		for range argc64 {
			var arg bytes.Buffer
			for {
				nextByte := make([]byte, 1)
				n, err := conn.Read(nextByte)
				if err != nil || n != 1 {
					writeRedError(sshStderr, "Failed to read arg: %v", err)
					return 1
				}
				if nextByte[0] == 0 {
					break
				}
				arg.WriteByte(nextByte[0])
			}
			args = append(args, arg.String())
		}

		gitEnv := make(map[string]string)
		for {
			var envLine bytes.Buffer
			for {
				nextByte := make([]byte, 1)
				n, err := conn.Read(nextByte)
				if err != nil || n != 1 {
					writeRedError(sshStderr, "Failed to read environment variable: %v", err)
					return 1
				}
				if nextByte[0] == 0 {
					break
				}
				envLine.WriteByte(nextByte[0])
			}
			if envLine.Len() == 0 {
				break
			}
			kv := envLine.String()
			parts := strings.SplitN(kv, "=", 2)
			if len(parts) < 2 {
				writeRedError(sshStderr, "Invalid environment variable line: %v", kv)
				return 1
			}
			gitEnv[parts[0]] = parts[1]
		}

		var stdin bytes.Buffer
		if _, err = io.Copy(&stdin, conn); err != nil {
			writeRedError(conn, "Failed to read to the stdin buffer: %v", err)
		}

		switch filepath.Base(args[0]) {
		case "pre-receive":
			if packPass.directAccess {
				return 0
			}
			allOK := true
			for {
				var line, oldOID, rest, newIOID, refName string
				var found bool
				var oldHash, newHash plumbing.Hash
				var oldCommit, newCommit *object.Commit
				var pushOptCount int

				pushOptCount, err = strconv.Atoi(gitEnv["GIT_PUSH_OPTION_COUNT"])
				if err != nil {
					writeRedError(sshStderr, "Failed to parse GIT_PUSH_OPTION_COUNT: %v", err)
					return 1
				}

				// TODO: Allow existing users (even if they are already federated or registered) to add a federated user ID... though perhaps this should be in the normal SSH interface instead of the git push interface?
				// Also it'd be nice to be able to combine users or whatever
				if packPass.contribReq == "federated" && packPass.userType != "federated" && packPass.userType != "registered" {
					if pushOptCount == 0 {
						writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu")
						return 1
					}
					for pushOptIndex := range pushOptCount {
						pushOpt, ok := gitEnv[fmt.Sprintf("GIT_PUSH_OPTION_%d", pushOptIndex)]
						if !ok {
							writeRedError(sshStderr, "Failed to get push option %d", pushOptIndex)
							return 1
						}
						if strings.HasPrefix(pushOpt, "fedid=") {
							fedUserID := strings.TrimPrefix(pushOpt, "fedid=")
							service, username, found := strings.Cut(fedUserID, ":")
							if !found {
								writeRedError(sshStderr, "Invalid federated user identifier %#v does not contain a colon", fedUserID)
								return 1
							}

							ok, err := s.fedauth(ctx, packPass.userID, service, username, packPass.pubkey)
							if err != nil {
								writeRedError(sshStderr, "Failed to verify federated user identifier %#v: %v", fedUserID, err)
								return 1
							}
							if !ok {
								writeRedError(sshStderr, "Failed to verify federated user identifier %#v: you don't seem to be on the list", fedUserID)
								return 1
							}

							break
						}
						if pushOptIndex == pushOptCount-1 {
							writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu")
							return 1
						}
					}
				}

				line, err = stdin.ReadString('\n')
				if errors.Is(err, io.EOF) {
					break
				} else if err != nil {
					writeRedError(sshStderr, "Failed to read pre-receive line: %v", err)
					return 1
				}
				line = line[:len(line)-1]

				oldOID, rest, found = strings.Cut(line, " ")
				if !found {
					writeRedError(sshStderr, "Invalid pre-receive line: %v", line)
					return 1
				}

				newIOID, refName, found = strings.Cut(rest, " ")
				if !found {
					writeRedError(sshStderr, "Invalid pre-receive line: %v", line)
					return 1
				}

				if strings.HasPrefix(refName, "refs/heads/contrib/") {
					if allZero(oldOID) { // New branch
						fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName)
						var newMRLocalID int

						if packPass.userID != 0 {
							err = s.database.QueryRow(ctx,
								"INSERT INTO merge_requests (repo_id, creator, source_ref, status) VALUES ($1, $2, $3, 'open') RETURNING repo_local_id",
								packPass.repoID, packPass.userID, strings.TrimPrefix(refName, "refs/heads/"),
							).Scan(&newMRLocalID)
						} else {
							err = s.database.QueryRow(ctx,
								"INSERT INTO merge_requests (repo_id, source_ref, status) VALUES ($1, $2, 'open') RETURNING repo_local_id",
								packPass.repoID, strings.TrimPrefix(refName, "refs/heads/"),
							).Scan(&newMRLocalID)
						}
						if err != nil {
							writeRedError(sshStderr, "Error creating merge request: %v", err)
							return 1
						}
						mergeRequestWebURL := fmt.Sprintf("%s/contrib/%d/", s.genHTTPRemoteURL(packPass.groupPath, packPass.repoName), newMRLocalID)
						fmt.Fprintln(sshStderr, ansiec.Blue+"Created merge request at", mergeRequestWebURL+ansiec.Reset)

						s.ircBot.Send("PRIVMSG #chat :New merge request at " + mergeRequestWebURL)
					} else { // Existing contrib branch
						var existingMRUser int
						var isAncestor bool

						err = s.database.QueryRow(ctx,
							"SELECT COALESCE(creator, 0) FROM merge_requests WHERE source_ref = $1 AND repo_id = $2",
							strings.TrimPrefix(refName, "refs/heads/"), packPass.repoID,
						).Scan(&existingMRUser)
						if err != nil {
							if errors.Is(err, pgx.ErrNoRows) {
								writeRedError(sshStderr, "No existing merge request for existing contrib branch: %v", err)
							} else {
								writeRedError(sshStderr, "Error querying for existing merge request: %v", err)
							}
							return 1
						}
						if existingMRUser == 0 {
							allOK = false
							fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs to unowned MR)")
							continue
						}

						if existingMRUser != packPass.userID {
							allOK = false
							fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs another user's MR)")
							continue
						}

						oldHash = plumbing.NewHash(oldOID)

						if oldCommit, err = packPass.repo.CommitObject(oldHash); err != nil {
							writeRedError(sshStderr, "Daemon failed to get old commit: %v", err)
							return 1
						}

						// Potential BUG: I'm not sure if new_commit is guaranteed to be
						// detectable as they haven't been merged into the main repo's
						// objects yet. But it seems to work, and I don't think there's
						// any reason for this to only work intermitently.
						newHash = plumbing.NewHash(newIOID)
						if newCommit, err = packPass.repo.CommitObject(newHash); err != nil {
							writeRedError(sshStderr, "Daemon failed to get new commit: %v", err)
							return 1
						}

						if isAncestor, err = oldCommit.IsAncestor(newCommit); err != nil {
							writeRedError(sshStderr, "Daemon failed to check if old commit is ancestor: %v", err)
							return 1
						}

						if !isAncestor {
							// TODO: Create MR snapshot ref instead
							allOK = false
							fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(force pushes are not supported yet)")
							continue
						}

						fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName)
					}
				} else { // Non-contrib branch
					allOK = false
					fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(you cannot push to branches outside of contrib/*)")
				}
			}

			fmt.Fprintln(sshStderr)
			if allOK {
				fmt.Fprintln(sshStderr, "Overall "+ansiec.Green+"ACK"+ansiec.Reset+" (all checks passed)")
				return 0
			}
			fmt.Fprintln(sshStderr, "Overall "+ansiec.Red+"NAK"+ansiec.Reset+" (one or more branches failed checks)")
			return 1
		default:
			fmt.Fprintln(sshStderr, ansiec.Red+"Invalid hook:", args[0]+ansiec.Reset)
			return 1
		}
	}()

	fmt.Fprintln(sshStderr)

	_, _ = conn.Write([]byte{hookRet})
}

// serveGitHooks handles connections on the specified network listener and
// treats incoming connections as those from git hook handlers by spawning
// sessions. The listener must be a SOCK_STREAM UNIX domain socket. The
// function itself blocks.
func (s *Server) serveGitHooks(listener net.Listener) error {
	for {
		conn, err := listener.Accept()
		if err != nil {
			return err
		}
		go s.hooksHandler(conn)
	}
}

// getUcred fetches connection credentials as a [syscall.Ucred] from a given
// [net.Conn]. It panics when conn is not a [net.UnixConn].
func getUcred(conn net.Conn) (ucred *syscall.Ucred, err error) {
	unixConn := conn.(*net.UnixConn)
	var unixConnFD *os.File

	if unixConnFD, err = unixConn.File(); err != nil {
		return nil, errGetFD
	}
	defer unixConnFD.Close()

	if ucred, err = syscall.GetsockoptUcred(int(unixConnFD.Fd()), syscall.SOL_SOCKET, syscall.SO_PEERCRED); err != nil {
		return nil, errGetUcred
	}
	return ucred, nil
}

// allZero returns true if all runes in a given string are '0'. The comparison
// is not constant time and must not be used in contexts where time-based side
// channel attacks are a concern.
func allZero(s string) bool {
	for _, r := range s {
		if r != '0' {
			return false
		}
	}
	return true
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
//
//go:build !linux

package unsorted

import (
	"bytes"
	"context"
	"encoding/binary"
	"errors"
	"fmt"
	"io"
	"net"
	"path/filepath"
	"strconv"
	"strings"

	"github.com/go-git/go-git/v5/plumbing"
	"github.com/go-git/go-git/v5/plumbing/object"
	"github.com/jackc/pgx/v5"
	"go.lindenii.runxiyu.org/forge/internal/ansiec"
	"go.lindenii.runxiyu.org/forge/internal/misc"
	"go.lindenii.runxiyu.org/forge/forged/internal/ansiec"
	"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)

// hooksHandler handles a connection from hookc via the
// unix socket.
func (s *Server) hooksHandler(conn net.Conn) {
	var ctx context.Context
	var cancel context.CancelFunc
	var err error
	var cookie []byte
	var packPass packPass
	var sshStderr io.Writer
	var hookRet byte

	defer conn.Close()
	ctx, cancel = context.WithCancel(context.Background())
	defer cancel()

	// TODO: ucred-like checks

	cookie = make([]byte, 64)
	if _, err = conn.Read(cookie); err != nil {
		if _, err = conn.Write([]byte{1}); err != nil {
			return
		}
		writeRedError(conn, "\nFailed to read cookie: %v", err)
		return
	}

	{
		var ok bool
		packPass, ok = s.packPasses.Load(misc.BytesToString(cookie))
		if !ok {
			if _, err = conn.Write([]byte{1}); err != nil {
				return
			}
			writeRedError(conn, "\nInvalid handler cookie")
			return
		}
	}

	sshStderr = packPass.session.Stderr()

	_, _ = sshStderr.Write([]byte{'\n'})

	hookRet = func() byte {
		var argc64 uint64
		if err = binary.Read(conn, binary.NativeEndian, &argc64); err != nil {
			writeRedError(sshStderr, "Failed to read argc: %v", err)
			return 1
		}
		var args []string
		for range argc64 {
			var arg bytes.Buffer
			for {
				nextByte := make([]byte, 1)
				n, err := conn.Read(nextByte)
				if err != nil || n != 1 {
					writeRedError(sshStderr, "Failed to read arg: %v", err)
					return 1
				}
				if nextByte[0] == 0 {
					break
				}
				arg.WriteByte(nextByte[0])
			}
			args = append(args, arg.String())
		}

		gitEnv := make(map[string]string)
		for {
			var envLine bytes.Buffer
			for {
				nextByte := make([]byte, 1)
				n, err := conn.Read(nextByte)
				if err != nil || n != 1 {
					writeRedError(sshStderr, "Failed to read environment variable: %v", err)
					return 1
				}
				if nextByte[0] == 0 {
					break
				}
				envLine.WriteByte(nextByte[0])
			}
			if envLine.Len() == 0 {
				break
			}
			kv := envLine.String()
			parts := strings.SplitN(kv, "=", 2)
			if len(parts) < 2 {
				writeRedError(sshStderr, "Invalid environment variable line: %v", kv)
				return 1
			}
			gitEnv[parts[0]] = parts[1]
		}

		var stdin bytes.Buffer
		if _, err = io.Copy(&stdin, conn); err != nil {
			writeRedError(conn, "Failed to read to the stdin buffer: %v", err)
		}

		switch filepath.Base(args[0]) {
		case "pre-receive":
			if packPass.directAccess {
				return 0
			}
			allOK := true
			for {
				var line, oldOID, rest, newIOID, refName string
				var found bool
				var oldHash, newHash plumbing.Hash
				var oldCommit, newCommit *object.Commit
				var pushOptCount int

				pushOptCount, err = strconv.Atoi(gitEnv["GIT_PUSH_OPTION_COUNT"])
				if err != nil {
					writeRedError(sshStderr, "Failed to parse GIT_PUSH_OPTION_COUNT: %v", err)
					return 1
				}

				// TODO: Allow existing users (even if they are already federated or registered) to add a federated user ID... though perhaps this should be in the normal SSH interface instead of the git push interface?
				// Also it'd be nice to be able to combine users or whatever
				if packPass.contribReq == "federated" && packPass.userType != "federated" && packPass.userType != "registered" {
					if pushOptCount == 0 {
						writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu")
						return 1
					}
					for pushOptIndex := range pushOptCount {
						pushOpt, ok := gitEnv[fmt.Sprintf("GIT_PUSH_OPTION_%d", pushOptIndex)]
						if !ok {
							writeRedError(sshStderr, "Failed to get push option %d", pushOptIndex)
							return 1
						}
						if strings.HasPrefix(pushOpt, "fedid=") {
							fedUserID := strings.TrimPrefix(pushOpt, "fedid=")
							service, username, found := strings.Cut(fedUserID, ":")
							if !found {
								writeRedError(sshStderr, "Invalid federated user identifier %#v does not contain a colon", fedUserID)
								return 1
							}

							ok, err := s.fedauth(ctx, packPass.userID, service, username, packPass.pubkey)
							if err != nil {
								writeRedError(sshStderr, "Failed to verify federated user identifier %#v: %v", fedUserID, err)
								return 1
							}
							if !ok {
								writeRedError(sshStderr, "Failed to verify federated user identifier %#v: you don't seem to be on the list", fedUserID)
								return 1
							}

							break
						}
						if pushOptIndex == pushOptCount-1 {
							writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu")
							return 1
						}
					}
				}

				line, err = stdin.ReadString('\n')
				if errors.Is(err, io.EOF) {
					break
				} else if err != nil {
					writeRedError(sshStderr, "Failed to read pre-receive line: %v", err)
					return 1
				}
				line = line[:len(line)-1]

				oldOID, rest, found = strings.Cut(line, " ")
				if !found {
					writeRedError(sshStderr, "Invalid pre-receive line: %v", line)
					return 1
				}

				newIOID, refName, found = strings.Cut(rest, " ")
				if !found {
					writeRedError(sshStderr, "Invalid pre-receive line: %v", line)
					return 1
				}

				if strings.HasPrefix(refName, "refs/heads/contrib/") {
					if allZero(oldOID) { // New branch
						fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName)
						var newMRLocalID int

						if packPass.userID != 0 {
							err = s.database.QueryRow(ctx,
								"INSERT INTO merge_requests (repo_id, creator, source_ref, status) VALUES ($1, $2, $3, 'open') RETURNING repo_local_id",
								packPass.repoID, packPass.userID, strings.TrimPrefix(refName, "refs/heads/"),
							).Scan(&newMRLocalID)
						} else {
							err = s.database.QueryRow(ctx,
								"INSERT INTO merge_requests (repo_id, source_ref, status) VALUES ($1, $2, 'open') RETURNING repo_local_id",
								packPass.repoID, strings.TrimPrefix(refName, "refs/heads/"),
							).Scan(&newMRLocalID)
						}
						if err != nil {
							writeRedError(sshStderr, "Error creating merge request: %v", err)
							return 1
						}
						mergeRequestWebURL := fmt.Sprintf("%s/contrib/%d/", s.genHTTPRemoteURL(packPass.groupPath, packPass.repoName), newMRLocalID)
						fmt.Fprintln(sshStderr, ansiec.Blue+"Created merge request at", mergeRequestWebURL+ansiec.Reset)

						s.ircBot.Send("PRIVMSG #chat :New merge request at " + mergeRequestWebURL)
					} else { // Existing contrib branch
						var existingMRUser int
						var isAncestor bool

						err = s.database.QueryRow(ctx,
							"SELECT COALESCE(creator, 0) FROM merge_requests WHERE source_ref = $1 AND repo_id = $2",
							strings.TrimPrefix(refName, "refs/heads/"), packPass.repoID,
						).Scan(&existingMRUser)
						if err != nil {
							if errors.Is(err, pgx.ErrNoRows) {
								writeRedError(sshStderr, "No existing merge request for existing contrib branch: %v", err)
							} else {
								writeRedError(sshStderr, "Error querying for existing merge request: %v", err)
							}
							return 1
						}
						if existingMRUser == 0 {
							allOK = false
							fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs to unowned MR)")
							continue
						}

						if existingMRUser != packPass.userID {
							allOK = false
							fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs another user's MR)")
							continue
						}

						oldHash = plumbing.NewHash(oldOID)

						if oldCommit, err = packPass.repo.CommitObject(oldHash); err != nil {
							writeRedError(sshStderr, "Daemon failed to get old commit: %v", err)
							return 1
						}

						// Potential BUG: I'm not sure if new_commit is guaranteed to be
						// detectable as they haven't been merged into the main repo's
						// objects yet. But it seems to work, and I don't think there's
						// any reason for this to only work intermitently.
						newHash = plumbing.NewHash(newIOID)
						if newCommit, err = packPass.repo.CommitObject(newHash); err != nil {
							writeRedError(sshStderr, "Daemon failed to get new commit: %v", err)
							return 1
						}

						if isAncestor, err = oldCommit.IsAncestor(newCommit); err != nil {
							writeRedError(sshStderr, "Daemon failed to check if old commit is ancestor: %v", err)
							return 1
						}

						if !isAncestor {
							// TODO: Create MR snapshot ref instead
							allOK = false
							fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(force pushes are not supported yet)")
							continue
						}

						fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName)
					}
				} else { // Non-contrib branch
					allOK = false
					fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(you cannot push to branches outside of contrib/*)")
				}
			}

			fmt.Fprintln(sshStderr)
			if allOK {
				fmt.Fprintln(sshStderr, "Overall "+ansiec.Green+"ACK"+ansiec.Reset+" (all checks passed)")
				return 0
			}
			fmt.Fprintln(sshStderr, "Overall "+ansiec.Red+"NAK"+ansiec.Reset+" (one or more branches failed checks)")
			return 1
		default:
			fmt.Fprintln(sshStderr, ansiec.Red+"Invalid hook:", args[0]+ansiec.Reset)
			return 1
		}
	}()

	fmt.Fprintln(sshStderr)

	_, _ = conn.Write([]byte{hookRet})
}

// serveGitHooks handles connections on the specified network listener and
// treats incoming connections as those from git hook handlers by spawning
// sessions. The listener must be a SOCK_STREAM UNIX domain socket. The
// function itself blocks.
func (s *Server) serveGitHooks(listener net.Listener) error {
	for {
		conn, err := listener.Accept()
		if err != nil {
			return err
		}
		go s.hooksHandler(conn)
	}
}

// allZero returns true if all runes in a given string are '0'. The comparison
// is not constant time and must not be used in contexts where time-based side
// channel attacks are a concern.
func allZero(s string) bool {
	for _, r := range s {
		if r != '0' {
			return false
		}
	}
	return true
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"github.com/go-git/go-git/v5"
	gitConfig "github.com/go-git/go-git/v5/config"
	gitFmtConfig "github.com/go-git/go-git/v5/plumbing/format/config"
)

// gitInit initializes a bare git repository with the forge-deployed hooks
// directory as the hooksPath.
func (s *Server) gitInit(repoPath string) (err error) {
	var repo *git.Repository
	var gitConf *gitConfig.Config

	if repo, err = git.PlainInit(repoPath, true); err != nil {
		return err
	}

	if gitConf, err = repo.Config(); err != nil {
		return err
	}

	gitConf.Raw.SetOption("core", gitFmtConfig.NoSubsection, "hooksPath", s.config.Hooks.Execs)
	gitConf.Raw.SetOption("receive", gitFmtConfig.NoSubsection, "advertisePushOptions", "true")

	if err = repo.SetConfig(gitConf); err != nil {
		return err
	}

	return nil
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"context"
	"errors"
	"io"
	"iter"

	"github.com/go-git/go-git/v5"
	"github.com/go-git/go-git/v5/plumbing/object"
	"github.com/jackc/pgx/v5/pgtype"
)

// openRepo opens a git repository by group and repo name.
//
// TODO: This should be deprecated in favor of doing it in the relevant
// request/router context in the future, as it cannot cover the nuance of
// fields needed.
func (s *Server) openRepo(ctx context.Context, groupPath []string, repoName string) (repo *git.Repository, description string, repoID int, fsPath string, err error) {
	err = s.database.QueryRow(ctx, `
WITH RECURSIVE group_path_cte AS (
	-- Start: match the first name in the path where parent_group IS NULL
	SELECT
		id,
		parent_group,
		name,
		1 AS depth
	FROM groups
	WHERE name = ($1::text[])[1]
		AND parent_group IS NULL

	UNION ALL

	-- Recurse: join next segment of the path
	SELECT
		g.id,
		g.parent_group,
		g.name,
		group_path_cte.depth + 1
	FROM groups g
	JOIN group_path_cte ON g.parent_group = group_path_cte.id
	WHERE g.name = ($1::text[])[group_path_cte.depth + 1]
		AND group_path_cte.depth + 1 <= cardinality($1::text[])
)
SELECT
	r.filesystem_path,
	COALESCE(r.description, ''),
	r.id
FROM group_path_cte g
JOIN repos r ON r.group_id = g.id
WHERE g.depth = cardinality($1::text[])
	AND r.name = $2
	`, pgtype.FlatArray[string](groupPath), repoName).Scan(&fsPath, &description, &repoID)
	if err != nil {
		return
	}

	repo, err = git.PlainOpen(fsPath)
	return
}

// commitIterSeqErr creates an [iter.Seq[*object.Commit]] from an
// [object.CommitIter], and additionally returns a pointer to error.
// The pointer to error is guaranteed to be populated with either nil or the
// error returned by the commit iterator after the returned iterator is
// finished.
func commitIterSeqErr(commitIter object.CommitIter) (iter.Seq[*object.Commit], *error) {
	var err error
	return func(yield func(*object.Commit) bool) {
		for {
			commit, err2 := commitIter.Next()
			if err2 != nil {
				if errors.Is(err2, io.EOF) {
					return
				}
				err = err2
				return
			}
			if !yield(commit) {
				return
			}
		}
	}, &err
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"bytes"
	"context"
	"encoding/hex"
	"errors"
	"os"
	"os/exec"
	"path"
	"sort"
	"strings"

	"go.lindenii.runxiyu.org/forge/internal/misc"
	"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)

func writeTree(ctx context.Context, repoPath string, entries []treeEntry) (string, error) {
	var buf bytes.Buffer

	sort.Slice(entries, func(i, j int) bool {
		nameI, nameJ := entries[i].name, entries[j].name

		if nameI == nameJ { // meh
			return !(entries[i].mode == "40000") && (entries[j].mode == "40000")
		}

		if strings.HasPrefix(nameJ, nameI) && len(nameI) < len(nameJ) {
			return !(entries[i].mode == "40000")
		}

		if strings.HasPrefix(nameI, nameJ) && len(nameJ) < len(nameI) {
			return entries[j].mode == "40000"
		}

		return nameI < nameJ
	})

	for _, e := range entries {
		buf.WriteString(e.mode)
		buf.WriteByte(' ')
		buf.WriteString(e.name)
		buf.WriteByte(0)
		buf.Write(e.sha)
	}

	cmd := exec.CommandContext(ctx, "git", "hash-object", "-w", "-t", "tree", "--stdin")
	cmd.Env = append(os.Environ(), "GIT_DIR="+repoPath)
	cmd.Stdin = &buf

	var out bytes.Buffer
	cmd.Stdout = &out
	if err := cmd.Run(); err != nil {
		return "", err
	}
	return strings.TrimSpace(out.String()), nil
}

func buildTreeRecursive(ctx context.Context, repoPath, baseTree string, updates map[string][]byte) (string, error) {
	treeCache := make(map[string][]treeEntry)

	var walk func(string, string) error
	walk = func(prefix, sha string) error {
		cmd := exec.CommandContext(ctx, "git", "cat-file", "tree", sha)
		cmd.Env = append(os.Environ(), "GIT_DIR="+repoPath)
		var out bytes.Buffer
		cmd.Stdout = &out
		if err := cmd.Run(); err != nil {
			return err
		}
		data := out.Bytes()
		i := 0
		var entries []treeEntry
		for i < len(data) {
			modeEnd := bytes.IndexByte(data[i:], ' ')
			if modeEnd < 0 {
				return errors.New("invalid tree format")
			}
			mode := misc.BytesToString(data[i : i+modeEnd])
			i += modeEnd + 1

			nameEnd := bytes.IndexByte(data[i:], 0)
			if nameEnd < 0 {
				return errors.New("missing null after filename")
			}
			name := misc.BytesToString(data[i : i+nameEnd])
			i += nameEnd + 1

			if i+20 > len(data) {
				return errors.New("unexpected EOF in SHA")
			}
			shaBytes := data[i : i+20]
			i += 20

			entries = append(entries, treeEntry{
				mode: mode,
				name: name,
				sha:  shaBytes,
			})

			if mode == "40000" {
				subPrefix := path.Join(prefix, name)
				if err := walk(subPrefix, hex.EncodeToString(shaBytes)); err != nil {
					return err
				}
			}
		}
		treeCache[prefix] = entries
		return nil
	}

	if err := walk("", baseTree); err != nil {
		return "", err
	}

	for filePath, blobSha := range updates {
		parts := strings.Split(filePath, "/")
		dir := strings.Join(parts[:len(parts)-1], "/")
		name := parts[len(parts)-1]

		entries := treeCache[dir]
		found := false
		for i, e := range entries {
			if e.name == name {
				if blobSha == nil {
					// Remove TODO
					entries = append(entries[:i], entries[i+1:]...)
				} else {
					entries[i].sha = blobSha
				}
				found = true
				break
			}
		}
		if !found && blobSha != nil {
			entries = append(entries, treeEntry{
				mode: "100644",
				name: name,
				sha:  blobSha,
			})
		}
		treeCache[dir] = entries
	}

	built := make(map[string][]byte)
	var build func(string) ([]byte, error)
	build = func(prefix string) ([]byte, error) {
		entries := treeCache[prefix]
		for i, e := range entries {
			if e.mode == "40000" {
				subPrefix := path.Join(prefix, e.name)
				if sha, ok := built[subPrefix]; ok {
					entries[i].sha = sha
					continue
				}
				newShaStr, err := build(subPrefix)
				if err != nil {
					return nil, err
				}
				entries[i].sha = newShaStr
			}
		}
		shaStr, err := writeTree(ctx, repoPath, entries)
		if err != nil {
			return nil, err
		}
		shaBytes, err := hex.DecodeString(shaStr)
		if err != nil {
			return nil, err
		}
		built[prefix] = shaBytes
		return shaBytes, nil
	}

	rootShaBytes, err := build("")
	if err != nil {
		return "", err
	}
	return hex.EncodeToString(rootShaBytes), nil
}

type treeEntry struct {
	mode string // like "100644"
	name string // individual name
	sha  []byte
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"github.com/go-git/go-git/v5"
	"github.com/go-git/go-git/v5/plumbing"
)

// getRefHash returns the hash of a reference given its
// type and name as supplied in URL queries.
func getRefHash(repo *git.Repository, refType, refName string) (refHash plumbing.Hash, err error) {
	var ref *plumbing.Reference
	switch refType {
	case "":
		if ref, err = repo.Head(); err != nil {
			return
		}
		refHash = ref.Hash()
	case "commit":
		refHash = plumbing.NewHash(refName)
	case "branch":
		if ref, err = repo.Reference(plumbing.NewBranchReferenceName(refName), true); err != nil {
			return
		}
		refHash = ref.Hash()
	case "tag":
		if ref, err = repo.Reference(plumbing.NewTagReferenceName(refName), true); err != nil {
			return
		}
		refHash = ref.Hash()
	default:
		panic("Invalid ref type " + refType)
	}
	return
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"net/http"
)

// getUserFromRequest returns the user ID and username associated with the
// session cookie in a given [http.Request].
func (s *Server) getUserFromRequest(request *http.Request) (id int, username string, err error) {
	var sessionCookie *http.Cookie

	if sessionCookie, err = request.Cookie("session"); err != nil {
		return
	}

	err = s.database.QueryRow(
		request.Context(),
		"SELECT user_id, COALESCE(username, '') FROM users u JOIN sessions s ON u.id = s.user_id WHERE s.session_id = $1;",
		sessionCookie.Value,
	).Scan(&id, &username)

	return
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"net/http"
	"strings"

	"github.com/go-git/go-git/v5"
	"github.com/go-git/go-git/v5/plumbing"
	"github.com/go-git/go-git/v5/plumbing/storer"
	"go.lindenii.runxiyu.org/forge/internal/misc"
	"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)

// httpHandleRepoBranches provides the branches page in repos.
func (s *Server) httpHandleRepoBranches(writer http.ResponseWriter, _ *http.Request, params map[string]any) {
	var repo *git.Repository
	var repoName string
	var groupPath []string
	var err error
	var notes []string
	var branches []string
	var branchesIter storer.ReferenceIter

	repo, repoName, groupPath = params["repo"].(*git.Repository), params["repo_name"].(string), params["group_path"].([]string)

	if strings.Contains(repoName, "\n") || misc.SliceContainsNewlines(groupPath) {
		notes = append(notes, "Path contains newlines; HTTP Git access impossible")
	}

	branchesIter, err = repo.Branches()
	if err == nil {
		_ = branchesIter.ForEach(func(branch *plumbing.Reference) error {
			branches = append(branches, branch.Name().Short())
			return nil
		})
	}
	params["branches"] = branches

	params["http_clone_url"] = s.genHTTPRemoteURL(groupPath, repoName)
	params["ssh_clone_url"] = s.genSSHRemoteURL(groupPath, repoName)
	params["notes"] = notes

	s.renderTemplate(writer, "repo_branches", params)
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"errors"
	"net/http"
	"path/filepath"
	"strconv"

	"github.com/jackc/pgx/v5"
	"github.com/jackc/pgx/v5/pgtype"
	"go.lindenii.runxiyu.org/forge/internal/misc"
	"go.lindenii.runxiyu.org/forge/internal/web"
	"go.lindenii.runxiyu.org/forge/forged/internal/misc"
	"go.lindenii.runxiyu.org/forge/forged/internal/web"
)

// httpHandleGroupIndex provides index pages for groups, which includes a list
// of its subgroups and repos, as well as a form for group maintainers to
// create repos.
func (s *Server) httpHandleGroupIndex(writer http.ResponseWriter, request *http.Request, params map[string]any) {
	var groupPath []string
	var repos []nameDesc
	var subgroups []nameDesc
	var err error
	var groupID int
	var groupDesc string

	groupPath = params["group_path"].([]string)

	// The group itself
	err = s.database.QueryRow(request.Context(), `
		WITH RECURSIVE group_path_cte AS (
			SELECT
				id,
				parent_group,
				name,
				1 AS depth
			FROM groups
			WHERE name = ($1::text[])[1]
				AND parent_group IS NULL

			UNION ALL

			SELECT
				g.id,
				g.parent_group,
				g.name,
				group_path_cte.depth + 1
			FROM groups g
			JOIN group_path_cte ON g.parent_group = group_path_cte.id
			WHERE g.name = ($1::text[])[group_path_cte.depth + 1]
				AND group_path_cte.depth + 1 <= cardinality($1::text[])
		)
		SELECT c.id, COALESCE(g.description, '')
		FROM group_path_cte c
		JOIN groups g ON g.id = c.id
		WHERE c.depth = cardinality($1::text[])
	`,
		pgtype.FlatArray[string](groupPath),
	).Scan(&groupID, &groupDesc)

	if errors.Is(err, pgx.ErrNoRows) {
		web.ErrorPage404(s.templates, writer, params)
		return
	} else if err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error getting group: "+err.Error())
		return
	}

	// ACL
	var count int
	err = s.database.QueryRow(request.Context(), `
		SELECT COUNT(*)
		FROM user_group_roles
		WHERE user_id = $1
			AND group_id = $2
	`, params["user_id"].(int), groupID).Scan(&count)
	if err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error checking access: "+err.Error())
		return
	}
	directAccess := (count > 0)

	if request.Method == http.MethodPost {
		if !directAccess {
			web.ErrorPage403(s.templates, writer, params, "You do not have direct access to this group")
			return
		}

		repoName := request.FormValue("repo_name")
		repoDesc := request.FormValue("repo_desc")
		contribReq := request.FormValue("repo_contrib")
		if repoName == "" {
			web.ErrorPage400(s.templates, writer, params, "Repo name is required")
			return
		}

		var newRepoID int
		err := s.database.QueryRow(
			request.Context(),
			`INSERT INTO repos (name, description, group_id, contrib_requirements)
	 VALUES ($1, $2, $3, $4)
	 RETURNING id`,
			repoName,
			repoDesc,
			groupID,
			contribReq,
		).Scan(&newRepoID)
		if err != nil {
			web.ErrorPage500(s.templates, writer, params, "Error creating repo: "+err.Error())
			return
		}

		filePath := filepath.Join(s.config.Git.RepoDir, strconv.Itoa(newRepoID)+".git")

		_, err = s.database.Exec(
			request.Context(),
			`UPDATE repos
	 SET filesystem_path = $1
	 WHERE id = $2`,
			filePath,
			newRepoID,
		)
		if err != nil {
			web.ErrorPage500(s.templates, writer, params, "Error updating repo path: "+err.Error())
			return
		}

		if err = s.gitInit(filePath); err != nil {
			web.ErrorPage500(s.templates, writer, params, "Error initializing repo: "+err.Error())
			return
		}

		misc.RedirectUnconditionally(writer, request)
		return
	}

	// Repos
	var rows pgx.Rows
	rows, err = s.database.Query(request.Context(), `
		SELECT name, COALESCE(description, '')
		FROM repos
		WHERE group_id = $1
	`, groupID)
	if err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error getting repos: "+err.Error())
		return
	}
	defer rows.Close()

	for rows.Next() {
		var name, description string
		if err = rows.Scan(&name, &description); err != nil {
			web.ErrorPage500(s.templates, writer, params, "Error getting repos: "+err.Error())
			return
		}
		repos = append(repos, nameDesc{name, description})
	}
	if err = rows.Err(); err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error getting repos: "+err.Error())
		return
	}

	// Subgroups
	rows, err = s.database.Query(request.Context(), `
		SELECT name, COALESCE(description, '')
		FROM groups
		WHERE parent_group = $1
	`, groupID)
	if err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error getting subgroups: "+err.Error())
		return
	}
	defer rows.Close()

	for rows.Next() {
		var name, description string
		if err = rows.Scan(&name, &description); err != nil {
			web.ErrorPage500(s.templates, writer, params, "Error getting subgroups: "+err.Error())
			return
		}
		subgroups = append(subgroups, nameDesc{name, description})
	}
	if err = rows.Err(); err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error getting subgroups: "+err.Error())
		return
	}

	params["repos"] = repos
	params["subgroups"] = subgroups
	params["description"] = groupDesc
	params["direct_access"] = directAccess

	s.renderTemplate(writer, "group", params)
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"net/http"
	"runtime"

	"github.com/dustin/go-humanize"
	"go.lindenii.runxiyu.org/forge/internal/web"
	"go.lindenii.runxiyu.org/forge/forged/internal/web"
)

// httpHandleIndex provides the main index page which includes a list of groups
// and some global information such as SSH keys.
func (s *Server) httpHandleIndex(writer http.ResponseWriter, request *http.Request, params map[string]any) {
	var err error
	var groups []nameDesc

	groups, err = s.queryNameDesc(request.Context(), "SELECT name, COALESCE(description, '') FROM groups WHERE parent_group IS NULL")
	if err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error querying groups: "+err.Error())
		return
	}
	params["groups"] = groups

	// Memory currently allocated
	memstats := runtime.MemStats{} //exhaustruct:ignore
	runtime.ReadMemStats(&memstats)
	params["mem"] = humanize.IBytes(memstats.Alloc)
	s.renderTemplate(writer, "index", params)
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"crypto/rand"
	"encoding/base64"
	"errors"
	"fmt"
	"net/http"
	"time"

	"github.com/alexedwards/argon2id"
	"github.com/jackc/pgx/v5"
	"go.lindenii.runxiyu.org/forge/internal/web"
	"go.lindenii.runxiyu.org/forge/forged/internal/web"
)

// httpHandleLogin provides the login page for local users.
func (s *Server) httpHandleLogin(writer http.ResponseWriter, request *http.Request, params map[string]any) {
	var username, password string
	var userID int
	var passwordHash string
	var err error
	var passwordMatches bool
	var cookieValue string
	var now time.Time
	var expiry time.Time
	var cookie http.Cookie

	if request.Method != http.MethodPost {
		s.renderTemplate(writer, "login", params)
		return
	}

	username = request.PostFormValue("username")
	password = request.PostFormValue("password")

	err = s.database.QueryRow(request.Context(),
		"SELECT id, COALESCE(password, '') FROM users WHERE username = $1",
		username,
	).Scan(&userID, &passwordHash)
	if err != nil {
		if errors.Is(err, pgx.ErrNoRows) {
			params["login_error"] = "Unknown username"
			s.renderTemplate(writer, "login", params)
			return
		}
		web.ErrorPage500(s.templates, writer, params, "Error querying user information: "+err.Error())
		return
	}
	if passwordHash == "" {
		params["login_error"] = "User has no password"
		s.renderTemplate(writer, "login", params)
		return
	}

	if passwordMatches, err = argon2id.ComparePasswordAndHash(password, passwordHash); err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error comparing password and hash: "+err.Error())
		return
	}

	if !passwordMatches {
		params["login_error"] = "Invalid password"
		s.renderTemplate(writer, "login", params)
		return
	}

	if cookieValue, err = randomUrlsafeStr(16); err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error getting random string: "+err.Error())
		return
	}

	now = time.Now()
	expiry = now.Add(time.Duration(s.config.HTTP.CookieExpiry) * time.Second)

	cookie = http.Cookie{
		Name:     "session",
		Value:    cookieValue,
		SameSite: http.SameSiteLaxMode,
		HttpOnly: true,
		Secure:   false, // TODO
		Expires:  expiry,
		Path:     "/",
	} //exhaustruct:ignore

	http.SetCookie(writer, &cookie)

	_, err = s.database.Exec(request.Context(), "INSERT INTO sessions (user_id, session_id) VALUES ($1, $2)", userID, cookieValue)
	if err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error inserting session: "+err.Error())
		return
	}

	http.Redirect(writer, request, "/", http.StatusSeeOther)
}

// randomUrlsafeStr generates a random string of the given entropic size
// using the URL-safe base64 encoding. The actual size of the string returned
// will be 4*sz.
func randomUrlsafeStr(sz int) (string, error) {
	r := make([]byte, 3*sz)
	_, err := rand.Read(r)
	if err != nil {
		return "", fmt.Errorf("error generating random string: %w", err)
	}
	return base64.RawURLEncoding.EncodeToString(r), nil
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"fmt"
	"net/http"
	"strings"

	"github.com/go-git/go-git/v5"
	"github.com/go-git/go-git/v5/plumbing"
	"github.com/go-git/go-git/v5/plumbing/filemode"
	"github.com/go-git/go-git/v5/plumbing/format/diff"
	"github.com/go-git/go-git/v5/plumbing/object"
	"go.lindenii.runxiyu.org/forge/internal/misc"
	"go.lindenii.runxiyu.org/forge/internal/oldgit"
	"go.lindenii.runxiyu.org/forge/internal/web"
	"go.lindenii.runxiyu.org/forge/forged/internal/misc"
	"go.lindenii.runxiyu.org/forge/forged/internal/oldgit"
	"go.lindenii.runxiyu.org/forge/forged/internal/web"
)

// usableFilePatch is a [diff.FilePatch] that is structured in a way more
// friendly for use in HTML templates.
type usableFilePatch struct {
	From   diff.File
	To     diff.File
	Chunks []usableChunk
}

// usableChunk is a [diff.Chunk] that is structured in a way more friendly for
// use in HTML templates.
type usableChunk struct {
	Operation diff.Operation
	Content   string
}

func (s *Server) httpHandleRepoCommit(writer http.ResponseWriter, request *http.Request, params map[string]any) {
	var repo *git.Repository
	var commitIDStrSpec, commitIDStrSpecNoSuffix string
	var commitID plumbing.Hash
	var parentCommitHash plumbing.Hash
	var commitObj *object.Commit
	var commitIDStr string
	var err error
	var patch *object.Patch

	repo, commitIDStrSpec = params["repo"].(*git.Repository), params["commit_id"].(string)

	commitIDStrSpecNoSuffix = strings.TrimSuffix(commitIDStrSpec, ".patch")
	commitID = plumbing.NewHash(commitIDStrSpecNoSuffix)
	if commitObj, err = repo.CommitObject(commitID); err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error getting commit object: "+err.Error())
		return
	}
	if commitIDStrSpecNoSuffix != commitIDStrSpec {
		var patchStr string
		if patchStr, err = oldgit.FmtCommitPatch(commitObj); err != nil {
			web.ErrorPage500(s.templates, writer, params, "Error formatting patch: "+err.Error())
			return
		}
		fmt.Fprintln(writer, patchStr)
		return
	}
	commitIDStr = commitObj.Hash.String()

	if commitIDStr != commitIDStrSpec {
		http.Redirect(writer, request, commitIDStr, http.StatusSeeOther)
		return
	}

	params["commit_object"] = commitObj
	params["commit_id"] = commitIDStr

	parentCommitHash, patch, err = oldgit.CommitToPatch(commitObj)
	if err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error getting patch from commit: "+err.Error())
		return
	}
	params["parent_commit_hash"] = parentCommitHash.String()
	params["patch"] = patch

	params["file_patches"] = makeUsableFilePatches(patch)

	s.renderTemplate(writer, "repo_commit", params)
}

type fakeDiffFile struct {
	hash plumbing.Hash
	mode filemode.FileMode
	path string
}

func (f fakeDiffFile) Hash() plumbing.Hash {
	return f.hash
}

func (f fakeDiffFile) Mode() filemode.FileMode {
	return f.mode
}

func (f fakeDiffFile) Path() string {
	return f.path
}

var nullFakeDiffFile = fakeDiffFile{ //nolint:gochecknoglobals
	hash: plumbing.NewHash("0000000000000000000000000000000000000000"),
	mode: misc.FirstOrPanic(filemode.New("100644")),
	path: "",
}

func makeUsableFilePatches(patch diff.Patch) (usableFilePatches []usableFilePatch) {
	// TODO: Remove unnecessary context
	// TODO: Prepend "+"/"-"/" " instead of solely distinguishing based on color

	for _, filePatch := range patch.FilePatches() {
		var fromFile, toFile diff.File
		var ufp usableFilePatch
		chunks := []usableChunk{}

		fromFile, toFile = filePatch.Files()
		if fromFile == nil {
			fromFile = nullFakeDiffFile
		}
		if toFile == nil {
			toFile = nullFakeDiffFile
		}
		for _, chunk := range filePatch.Chunks() {
			var content string

			content = chunk.Content()
			if len(content) > 0 && content[0] == '\n' {
				content = "\n" + content
			} // Horrible hack to fix how browsers newlines that immediately proceed <pre>
			chunks = append(chunks, usableChunk{
				Operation: chunk.Type(),
				Content:   content,
			})
		}
		ufp = usableFilePatch{
			Chunks: chunks,
			From:   fromFile,
			To:     toFile,
		}
		usableFilePatches = append(usableFilePatches, ufp)
	}
	return
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"net/http"

	"github.com/jackc/pgx/v5"
	"go.lindenii.runxiyu.org/forge/internal/web"
	"go.lindenii.runxiyu.org/forge/forged/internal/web"
)

// idTitleStatus describes properties of a merge request that needs to be
// present in MR listings.
type idTitleStatus struct {
	ID     int
	Title  string
	Status string
}

// httpHandleRepoContribIndex provides an index to merge requests of a repo.
func (s *Server) httpHandleRepoContribIndex(writer http.ResponseWriter, request *http.Request, params map[string]any) {
	var rows pgx.Rows
	var result []idTitleStatus
	var err error

	if rows, err = s.database.Query(request.Context(),
		"SELECT repo_local_id, COALESCE(title, 'Untitled'), status FROM merge_requests WHERE repo_id = $1",
		params["repo_id"],
	); err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error querying merge requests: "+err.Error())
		return
	}
	defer rows.Close()

	for rows.Next() {
		var mrID int
		var mrTitle, mrStatus string
		if err = rows.Scan(&mrID, &mrTitle, &mrStatus); err != nil {
			web.ErrorPage500(s.templates, writer, params, "Error scanning merge request: "+err.Error())
			return
		}
		result = append(result, idTitleStatus{mrID, mrTitle, mrStatus})
	}
	if err = rows.Err(); err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error ranging over merge requests: "+err.Error())
		return
	}
	params["merge_requests"] = result

	s.renderTemplate(writer, "repo_contrib_index", params)
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"net/http"
	"strconv"

	"github.com/go-git/go-git/v5"
	"github.com/go-git/go-git/v5/plumbing"
	"github.com/go-git/go-git/v5/plumbing/object"
	"go.lindenii.runxiyu.org/forge/internal/web"
	"go.lindenii.runxiyu.org/forge/forged/internal/web"
)

// httpHandleRepoContribOne provides an interface to each merge request of a
// repo.
func (s *Server) httpHandleRepoContribOne(writer http.ResponseWriter, request *http.Request, params map[string]any) {
	var mrIDStr string
	var mrIDInt int
	var err error
	var title, status, srcRefStr, dstBranchStr string
	var repo *git.Repository
	var srcRefHash plumbing.Hash
	var dstBranchHash plumbing.Hash
	var srcCommit, dstCommit, mergeBaseCommit *object.Commit
	var mergeBases []*object.Commit

	mrIDStr = params["mr_id"].(string)
	mrIDInt64, err := strconv.ParseInt(mrIDStr, 10, strconv.IntSize)
	if err != nil {
		web.ErrorPage400(s.templates, writer, params, "Merge request ID not an integer")
		return
	}
	mrIDInt = int(mrIDInt64)

	if err = s.database.QueryRow(request.Context(),
		"SELECT COALESCE(title, ''), status, source_ref, COALESCE(destination_branch, '') FROM merge_requests WHERE repo_id = $1 AND repo_local_id = $2",
		params["repo_id"], mrIDInt,
	).Scan(&title, &status, &srcRefStr, &dstBranchStr); err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error querying merge request: "+err.Error())
		return
	}

	repo = params["repo"].(*git.Repository)

	if srcRefHash, err = getRefHash(repo, "branch", srcRefStr); err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error getting source ref hash: "+err.Error())
		return
	}
	if srcCommit, err = repo.CommitObject(srcRefHash); err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error getting source commit: "+err.Error())
		return
	}
	params["source_commit"] = srcCommit

	if dstBranchStr == "" {
		dstBranchStr = "HEAD"
		dstBranchHash, err = getRefHash(repo, "", "")
	} else {
		dstBranchHash, err = getRefHash(repo, "branch", dstBranchStr)
	}
	if err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error getting destination branch hash: "+err.Error())
		return
	}

	if dstCommit, err = repo.CommitObject(dstBranchHash); err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error getting destination commit: "+err.Error())
		return
	}
	params["destination_commit"] = dstCommit

	if mergeBases, err = srcCommit.MergeBase(dstCommit); err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error getting merge base: "+err.Error())
		return
	}

	if len(mergeBases) < 1 {
		web.ErrorPage500(s.templates, writer, params, "No merge base found for this merge request; these two branches do not share any common history")
		// TODO
		return
	}

	mergeBaseCommit = mergeBases[0]
	params["merge_base"] = mergeBaseCommit

	patch, err := mergeBaseCommit.Patch(srcCommit)
	if err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error getting patch: "+err.Error())
		return
	}
	params["file_patches"] = makeUsableFilePatches(patch)

	params["mr_title"], params["mr_status"], params["mr_source_ref"], params["mr_destination_branch"] = title, status, srcRefStr, dstBranchStr

	s.renderTemplate(writer, "repo_contrib_one", params)
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"net/http"

	"go.lindenii.runxiyu.org/forge/internal/git2c"
	"go.lindenii.runxiyu.org/forge/internal/render"
	"go.lindenii.runxiyu.org/forge/internal/web"
	"go.lindenii.runxiyu.org/forge/forged/internal/git2c"
	"go.lindenii.runxiyu.org/forge/forged/internal/render"
	"go.lindenii.runxiyu.org/forge/forged/internal/web"
)

// httpHandleRepoIndex provides the front page of a repo using git2d.
func (s *Server) httpHandleRepoIndex(w http.ResponseWriter, req *http.Request, params map[string]any) {
	repoName := params["repo_name"].(string)
	groupPath := params["group_path"].([]string)

	_, repoPath, _, _, _, _, _ := s.getRepoInfo(req.Context(), groupPath, repoName, "") // TODO: Don't use getRepoInfo

	client, err := git2c.NewClient(s.config.Git.Socket)
	if err != nil {
		web.ErrorPage500(s.templates, w, params, err.Error())
		return
	}
	defer client.Close()

	commits, readme, err := client.CmdIndex(repoPath)
	if err != nil {
		web.ErrorPage500(s.templates, w, params, err.Error())
		return
	}

	params["commits"] = commits
	params["readme_filename"] = readme.Filename
	_, params["readme"] = render.Readme(readme.Content, readme.Filename)

	s.renderTemplate(w, "repo_index", params)

	// TODO: Caching
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"fmt"
	"io"
	"net/http"
	"os/exec"

	"github.com/jackc/pgx/v5/pgtype"
)

// httpHandleRepoInfo provides advertised refs of a repo for use in Git's Smart
// HTTP protocol.
//
// TODO: Reject access from web browsers.
func (s *Server) httpHandleRepoInfo(writer http.ResponseWriter, request *http.Request, params map[string]any) (err error) {
	groupPath := params["group_path"].([]string)
	repoName := params["repo_name"].(string)
	var repoPath string

	if err := s.database.QueryRow(request.Context(), `
	WITH RECURSIVE group_path_cte AS (
		-- Start: match the first name in the path where parent_group IS NULL
		SELECT
			id,
			parent_group,
			name,
			1 AS depth
		FROM groups
		WHERE name = ($1::text[])[1]
			AND parent_group IS NULL
	
		UNION ALL
	
		-- Recurse: jion next segment of the path
		SELECT
			g.id,
			g.parent_group,
			g.name,
			group_path_cte.depth + 1
		FROM groups g
		JOIN group_path_cte ON g.parent_group = group_path_cte.id
		WHERE g.name = ($1::text[])[group_path_cte.depth + 1]
			AND group_path_cte.depth + 1 <= cardinality($1::text[])
	)
	SELECT r.filesystem_path
	FROM group_path_cte c
	JOIN repos r ON r.group_id = c.id
	WHERE c.depth = cardinality($1::text[])
		AND r.name = $2
	`,
		pgtype.FlatArray[string](groupPath),
		repoName,
	).Scan(&repoPath); err != nil {
		return err
	}

	writer.Header().Set("Content-Type", "application/x-git-upload-pack-advertisement")
	writer.WriteHeader(http.StatusOK)

	cmd := exec.Command("git", "upload-pack", "--stateless-rpc", "--advertise-refs", repoPath)
	stdout, err := cmd.StdoutPipe()
	if err != nil {
		return err
	}
	defer func() {
		_ = stdout.Close()
	}()
	cmd.Stderr = cmd.Stdout

	if err = cmd.Start(); err != nil {
		return err
	}

	if err = packLine(writer, "# service=git-upload-pack\n"); err != nil {
		return err
	}

	if err = packFlush(writer); err != nil {
		return
	}

	if _, err = io.Copy(writer, stdout); err != nil {
		return err
	}

	if err = cmd.Wait(); err != nil {
		return err
	}

	return nil
}

// Taken from https://github.com/icyphox/legit, MIT license.
func packLine(w io.Writer, s string) error {
	_, err := fmt.Fprintf(w, "%04x%s", len(s)+4, s)
	return err
}

// Taken from https://github.com/icyphox/legit, MIT license.
func packFlush(w io.Writer) error {
	_, err := fmt.Fprint(w, "0000")
	return err
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"net/http"

	"github.com/go-git/go-git/v5"
	"github.com/go-git/go-git/v5/plumbing"
	"go.lindenii.runxiyu.org/forge/internal/web"
	"go.lindenii.runxiyu.org/forge/forged/internal/web"
)

// httpHandleRepoLog provides a page with a complete Git log.
//
// TODO: This currently provides all commits in the branch. It should be
// paginated and cached instead.
func (s *Server) httpHandleRepoLog(writer http.ResponseWriter, _ *http.Request, params map[string]any) {
	var repo *git.Repository
	var refHash plumbing.Hash
	var err error

	repo = params["repo"].(*git.Repository)

	if refHash, err = getRefHash(repo, params["ref_type"].(string), params["ref_name"].(string)); err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error getting ref hash: "+err.Error())
		return
	}

	logOptions := git.LogOptions{From: refHash} //exhaustruct:ignore
	commitIter, err := repo.Log(&logOptions)
	if err != nil {
		web.ErrorPage500(s.templates, writer, params, "Error getting recent commits: "+err.Error())
		return
	}
	params["commits"], params["commits_err"] = commitIterSeqErr(commitIter)

	s.renderTemplate(writer, "repo_log", params)
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"fmt"
	"html/template"
	"net/http"
	"strings"

	"go.lindenii.runxiyu.org/forge/internal/git2c"
	"go.lindenii.runxiyu.org/forge/internal/misc"
	"go.lindenii.runxiyu.org/forge/internal/web"
	"go.lindenii.runxiyu.org/forge/forged/internal/git2c"
	"go.lindenii.runxiyu.org/forge/forged/internal/misc"
	"go.lindenii.runxiyu.org/forge/forged/internal/web"
)

// httpHandleRepoRaw serves raw files, or directory listings that point to raw
// files.
func (s *Server) httpHandleRepoRaw(writer http.ResponseWriter, request *http.Request, params map[string]any) {
	repoName := params["repo_name"].(string)
	groupPath := params["group_path"].([]string)
	rawPathSpec := params["rest"].(string)
	pathSpec := strings.TrimSuffix(rawPathSpec, "/")
	params["path_spec"] = pathSpec

	_, repoPath, _, _, _, _, _ := s.getRepoInfo(request.Context(), groupPath, repoName, "")

	client, err := git2c.NewClient(s.config.Git.Socket)
	if err != nil {
		web.ErrorPage500(s.templates, writer, params, err.Error())
		return
	}
	defer client.Close()

	files, content, err := client.CmdTreeRaw(repoPath, pathSpec)
	if err != nil {
		web.ErrorPage500(s.templates, writer, params, err.Error())
		return
	}

	switch {
	case files != nil:
		params["files"] = files
		params["readme_filename"] = "README.md"
		params["readme"] = template.HTML("<p>README rendering here is WIP again</p>") // TODO
		s.renderTemplate(writer, "repo_raw_dir", params)
	case content != "":
		if misc.RedirectNoDir(writer, request) {
			return
		}
		writer.Header().Set("Content-Type", "application/octet-stream")
		fmt.Fprint(writer, content)
	default:
		web.ErrorPage500(s.templates, writer, params, "Unknown error fetching repo raw data")
	}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"html/template"
	"net/http"
	"strings"

	"go.lindenii.runxiyu.org/forge/internal/git2c"
	"go.lindenii.runxiyu.org/forge/internal/render"
	"go.lindenii.runxiyu.org/forge/internal/web"
	"go.lindenii.runxiyu.org/forge/forged/internal/git2c"
	"go.lindenii.runxiyu.org/forge/forged/internal/render"
	"go.lindenii.runxiyu.org/forge/forged/internal/web"
)

// httpHandleRepoTree provides a friendly, syntax-highlighted view of
// individual files, and provides directory views that link to these files.
//
// TODO: Do not highlight files that are too large.
func (s *Server) httpHandleRepoTree(writer http.ResponseWriter, request *http.Request, params map[string]any) {
	repoName := params["repo_name"].(string)
	groupPath := params["group_path"].([]string)
	rawPathSpec := params["rest"].(string)
	pathSpec := strings.TrimSuffix(rawPathSpec, "/")
	params["path_spec"] = pathSpec

	_, repoPath, _, _, _, _, _ := s.getRepoInfo(request.Context(), groupPath, repoName, "")

	client, err := git2c.NewClient(s.config.Git.Socket)
	if err != nil {
		web.ErrorPage500(s.templates, writer, params, err.Error())
		return
	}
	defer client.Close()

	files, content, err := client.CmdTreeRaw(repoPath, pathSpec)
	if err != nil {
		web.ErrorPage500(s.templates, writer, params, err.Error())
		return
	}

	switch {
	case files != nil:
		params["files"] = files
		params["readme_filename"] = "README.md"
		params["readme"] = template.HTML("<p>README rendering here is WIP again</p>") // TODO
		s.renderTemplate(writer, "repo_tree_dir", params)
	case content != "":
		rendered := render.Highlight(pathSpec, content)
		params["file_contents"] = rendered
		s.renderTemplate(writer, "repo_tree_file", params)
	default:
		web.ErrorPage500(s.templates, writer, params, "Unknown object type, something is seriously wrong")
	}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"io"
	"net/http"
	"os"
	"os/exec"

	"github.com/jackc/pgx/v5/pgtype"
)

// httpHandleUploadPack handles incoming Git fetch/pull/clone's over the Smart
// HTTP protocol.
func (s *Server) httpHandleUploadPack(writer http.ResponseWriter, request *http.Request, params map[string]any) (err error) {
	var groupPath []string
	var repoName string
	var repoPath string
	var stdout io.ReadCloser
	var stdin io.WriteCloser
	var cmd *exec.Cmd

	groupPath, repoName = params["group_path"].([]string), params["repo_name"].(string)

	if err := s.database.QueryRow(request.Context(), `
	WITH RECURSIVE group_path_cte AS (
		-- Start: match the first name in the path where parent_group IS NULL
		SELECT
			id,
			parent_group,
			name,
			1 AS depth
		FROM groups
		WHERE name = ($1::text[])[1]
			AND parent_group IS NULL
	
		UNION ALL
	
		-- Recurse: jion next segment of the path
		SELECT
			g.id,
			g.parent_group,
			g.name,
			group_path_cte.depth + 1
		FROM groups g
		JOIN group_path_cte ON g.parent_group = group_path_cte.id
		WHERE g.name = ($1::text[])[group_path_cte.depth + 1]
			AND group_path_cte.depth + 1 <= cardinality($1::text[])
	)
	SELECT r.filesystem_path
	FROM group_path_cte c
	JOIN repos r ON r.group_id = c.id
	WHERE c.depth = cardinality($1::text[])
		AND r.name = $2
	`,
		pgtype.FlatArray[string](groupPath),
		repoName,
	).Scan(&repoPath); err != nil {
		return err
	}

	writer.Header().Set("Content-Type", "application/x-git-upload-pack-result")
	writer.Header().Set("Connection", "Keep-Alive")
	writer.Header().Set("Transfer-Encoding", "chunked")
	writer.WriteHeader(http.StatusOK)

	cmd = exec.Command("git", "upload-pack", "--stateless-rpc", repoPath)
	cmd.Env = append(os.Environ(), "LINDENII_FORGE_HOOKS_SOCKET_PATH="+s.config.Hooks.Socket)
	if stdout, err = cmd.StdoutPipe(); err != nil {
		return err
	}
	cmd.Stderr = cmd.Stdout
	defer func() {
		_ = stdout.Close()
	}()

	if stdin, err = cmd.StdinPipe(); err != nil {
		return err
	}
	defer func() {
		_ = stdin.Close()
	}()

	if err = cmd.Start(); err != nil {
		return err
	}

	if _, err = io.Copy(stdin, request.Body); err != nil {
		return err
	}

	if err = stdin.Close(); err != nil {
		return err
	}

	if _, err = io.Copy(writer, stdout); err != nil {
		return err
	}

	if err = cmd.Wait(); err != nil {
		return err
	}

	return nil
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"net/http"

	"go.lindenii.runxiyu.org/forge/internal/web"
	"go.lindenii.runxiyu.org/forge/forged/internal/web"
)

// httpHandleUsers is a useless stub.
func (s *Server) httpHandleUsers(writer http.ResponseWriter, _ *http.Request, params map[string]any) {
	web.ErrorPage501(s.templates, writer, params)
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"errors"
	"log/slog"
	"net/http"
	"net/url"
	"strconv"
	"strings"

	"github.com/jackc/pgx/v5"
	"go.lindenii.runxiyu.org/forge/internal/misc"
	"go.lindenii.runxiyu.org/forge/internal/web"
	"go.lindenii.runxiyu.org/forge/forged/internal/misc"
	"go.lindenii.runxiyu.org/forge/forged/internal/web"
)

// ServeHTTP handles all incoming HTTP requests and routes them to the correct
// location.
//
// TODO: This function is way too large.
func (s *Server) ServeHTTP(writer http.ResponseWriter, request *http.Request) {
	var remoteAddr string
	if s.config.HTTP.ReverseProxy {
		remoteAddrs, ok := request.Header["X-Forwarded-For"]
		if ok && len(remoteAddrs) == 1 {
			remoteAddr = remoteAddrs[0]
		} else {
			remoteAddr = request.RemoteAddr
		}
	} else {
		remoteAddr = request.RemoteAddr
	}
	slog.Info("incoming http", "addr", remoteAddr, "method", request.Method, "uri", request.RequestURI)

	var segments []string
	var err error
	var sepIndex int
	params := make(map[string]any)

	if segments, _, err = misc.ParseReqURI(request.RequestURI); err != nil {
		web.ErrorPage400(s.templates, writer, params, "Error parsing request URI: "+err.Error())
		return
	}
	dirMode := false
	if segments[len(segments)-1] == "" {
		dirMode = true
		segments = segments[:len(segments)-1]
	}

	params["url_segments"] = segments
	params["dir_mode"] = dirMode
	params["global"] = s.globalData
	var userID int // 0 for none
	userID, params["username"], err = s.getUserFromRequest(request)
	params["user_id"] = userID
	if err != nil && !errors.Is(err, http.ErrNoCookie) && !errors.Is(err, pgx.ErrNoRows) {
		web.ErrorPage500(s.templates, writer, params, "Error getting user info from request: "+err.Error())
		return
	}

	if userID == 0 {
		params["user_id_string"] = ""
	} else {
		params["user_id_string"] = strconv.Itoa(userID)
	}

	for _, v := range segments {
		if strings.Contains(v, ":") {
			web.ErrorPage400Colon(s.templates, writer, params)
			return
		}
	}

	if len(segments) == 0 {
		s.httpHandleIndex(writer, request, params)
		return
	}

	if segments[0] == "-" {
		if len(segments) < 2 {
			web.ErrorPage404(s.templates, writer, params)
			return
		} else if len(segments) == 2 && misc.RedirectDir(writer, request) {
			return
		}

		switch segments[1] {
		case "static":
			s.staticHandler.ServeHTTP(writer, request)
			return
		case "source":
			s.sourceHandler.ServeHTTP(writer, request)
			return
		}
	}

	if segments[0] == "-" {
		switch segments[1] {
		case "login":
			s.httpHandleLogin(writer, request, params)
			return
		case "users":
			s.httpHandleUsers(writer, request, params)
			return
		default:
			web.ErrorPage404(s.templates, writer, params)
			return
		}
	}

	sepIndex = -1
	for i, part := range segments {
		if part == "-" {
			sepIndex = i
			break
		}
	}

	params["separator_index"] = sepIndex

	var groupPath []string
	var moduleType string
	var moduleName string

	if sepIndex > 0 {
		groupPath = segments[:sepIndex]
	} else {
		groupPath = segments
	}
	params["group_path"] = groupPath

	switch {
	case sepIndex == -1:
		if misc.RedirectDir(writer, request) {
			return
		}
		s.httpHandleGroupIndex(writer, request, params)
	case len(segments) == sepIndex+1:
		web.ErrorPage404(s.templates, writer, params)
		return
	case len(segments) == sepIndex+2:
		web.ErrorPage404(s.templates, writer, params)
		return
	default:
		moduleType = segments[sepIndex+1]
		moduleName = segments[sepIndex+2]
		switch moduleType {
		case "repos":
			params["repo_name"] = moduleName

			if len(segments) > sepIndex+3 {
				switch segments[sepIndex+3] {
				case "info":
					if err = s.httpHandleRepoInfo(writer, request, params); err != nil {
						web.ErrorPage500(s.templates, writer, params, err.Error())
					}
					return
				case "git-upload-pack":
					if err = s.httpHandleUploadPack(writer, request, params); err != nil {
						web.ErrorPage500(s.templates, writer, params, err.Error())
					}
					return
				}
			}

			if params["ref_type"], params["ref_name"], err = misc.GetParamRefTypeName(request); err != nil {
				if errors.Is(err, misc.ErrNoRefSpec) {
					params["ref_type"] = ""
				} else {
					web.ErrorPage400(s.templates, writer, params, "Error querying ref type: "+err.Error())
					return
				}
			}

			if params["repo"], params["repo_description"], params["repo_id"], _, err = s.openRepo(request.Context(), groupPath, moduleName); err != nil {
				web.ErrorPage500(s.templates, writer, params, "Error opening repo: "+err.Error())
				return
			}

			repoURLRoot := "/"
			for _, part := range segments[:sepIndex+3] {
				repoURLRoot = repoURLRoot + url.PathEscape(part) + "/"
			}
			params["repo_url_root"] = repoURLRoot
			params["repo_patch_mailing_list"] = repoURLRoot[1:len(repoURLRoot)-1] + "@" + s.config.LMTP.Domain
			params["http_clone_url"] = s.genHTTPRemoteURL(groupPath, moduleName)
			params["ssh_clone_url"] = s.genSSHRemoteURL(groupPath, moduleName)

			if len(segments) == sepIndex+3 {
				if misc.RedirectDir(writer, request) {
					return
				}
				s.httpHandleRepoIndex(writer, request, params)
				return
			}

			repoFeature := segments[sepIndex+3]
			switch repoFeature {
			case "tree":
				if misc.AnyContain(segments[sepIndex+4:], "/") {
					web.ErrorPage400(s.templates, writer, params, "Repo tree paths may not contain slashes in any segments")
					return
				}
				if dirMode {
					params["rest"] = strings.Join(segments[sepIndex+4:], "/") + "/"
				} else {
					params["rest"] = strings.Join(segments[sepIndex+4:], "/")
				}
				if len(segments) < sepIndex+5 && misc.RedirectDir(writer, request) {
					return
				}
				s.httpHandleRepoTree(writer, request, params)
			case "branches":
				if misc.RedirectDir(writer, request) {
					return
				}
				s.httpHandleRepoBranches(writer, request, params)
				return
			case "raw":
				if misc.AnyContain(segments[sepIndex+4:], "/") {
					web.ErrorPage400(s.templates, writer, params, "Repo tree paths may not contain slashes in any segments")
					return
				}
				if dirMode {
					params["rest"] = strings.Join(segments[sepIndex+4:], "/") + "/"
				} else {
					params["rest"] = strings.Join(segments[sepIndex+4:], "/")
				}
				if len(segments) < sepIndex+5 && misc.RedirectDir(writer, request) {
					return
				}
				s.httpHandleRepoRaw(writer, request, params)
			case "log":
				if len(segments) > sepIndex+4 {
					web.ErrorPage400(s.templates, writer, params, "Too many parameters")
					return
				}
				if misc.RedirectDir(writer, request) {
					return
				}
				s.httpHandleRepoLog(writer, request, params)
			case "commit":
				if len(segments) != sepIndex+5 {
					web.ErrorPage400(s.templates, writer, params, "Incorrect number of parameters")
					return
				}
				if misc.RedirectNoDir(writer, request) {
					return
				}
				params["commit_id"] = segments[sepIndex+4]
				s.httpHandleRepoCommit(writer, request, params)
			case "contrib":
				if misc.RedirectDir(writer, request) {
					return
				}
				switch len(segments) {
				case sepIndex + 4:
					s.httpHandleRepoContribIndex(writer, request, params)
				case sepIndex + 5:
					params["mr_id"] = segments[sepIndex+4]
					s.httpHandleRepoContribOne(writer, request, params)
				default:
					web.ErrorPage400(s.templates, writer, params, "Too many parameters")
				}
			default:
				web.ErrorPage404(s.templates, writer, params)
				return
			}
		default:
			web.ErrorPage404(s.templates, writer, params)
			return
		}
	}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"log/slog"
	"net/http"
)

// renderTemplate abstracts out the annoyances of reporting template rendering
// errors.
func (s *Server) renderTemplate(w http.ResponseWriter, templateName string, params map[string]any) {
	if err := s.templates.ExecuteTemplate(w, templateName, params); err != nil {
		http.Error(w, "error rendering template: "+err.Error(), http.StatusInternalServerError)
		slog.Error("error rendering template", "error", err.Error())
	}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"bytes"
	"crypto/rand"
	"encoding/hex"
	"fmt"
	"io"
	"os"
	"os/exec"
	"strings"
	"time"

	"github.com/bluekeyes/go-gitdiff/gitdiff"
	"github.com/go-git/go-git/v5"
	"go.lindenii.runxiyu.org/forge/internal/misc"
	"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)

func (s *Server) lmtpHandlePatch(session *lmtpSession, groupPath []string, repoName string, mbox io.Reader) (err error) {
	var diffFiles []*gitdiff.File
	var preamble string
	if diffFiles, preamble, err = gitdiff.Parse(mbox); err != nil {
		return fmt.Errorf("failed to parse patch: %w", err)
	}

	var header *gitdiff.PatchHeader
	if header, err = gitdiff.ParsePatchHeader(preamble); err != nil {
		return fmt.Errorf("failed to parse patch headers: %w", err)
	}

	var repo *git.Repository
	var fsPath string
	repo, _, _, fsPath, err = s.openRepo(session.ctx, groupPath, repoName)
	if err != nil {
		return fmt.Errorf("failed to open repo: %w", err)
	}

	headRef, err := repo.Head()
	if err != nil {
		return fmt.Errorf("failed to get repo head hash: %w", err)
	}
	headCommit, err := repo.CommitObject(headRef.Hash())
	if err != nil {
		return fmt.Errorf("failed to get repo head commit: %w", err)
	}
	headTree, err := headCommit.Tree()
	if err != nil {
		return fmt.Errorf("failed to get repo head tree: %w", err)
	}

	headTreeHash := headTree.Hash.String()

	blobUpdates := make(map[string][]byte)
	for _, diffFile := range diffFiles {
		sourceFile, err := headTree.File(diffFile.OldName)
		if err != nil {
			return fmt.Errorf("failed to get file at old name %#v: %w", diffFile.OldName, err)
		}
		sourceString, err := sourceFile.Contents()
		if err != nil {
			return fmt.Errorf("failed to get contents: %w", err)
		}

		sourceBuf := bytes.NewReader(misc.StringToBytes(sourceString))
		var patchedBuf bytes.Buffer
		if err := gitdiff.Apply(&patchedBuf, sourceBuf, diffFile); err != nil {
			return fmt.Errorf("failed to apply patch: %w", err)
		}

		var hashBuf bytes.Buffer

		// It's really difficult to do this via go-git so we're just
		// going to use upstream git for now.
		// TODO
		cmd := exec.CommandContext(session.ctx, "git", "hash-object", "-w", "-t", "blob", "--stdin")
		cmd.Env = append(os.Environ(), "GIT_DIR="+fsPath)
		cmd.Stdout = &hashBuf
		cmd.Stdin = &patchedBuf
		if err := cmd.Run(); err != nil {
			return fmt.Errorf("failed to run git hash-object: %w", err)
		}

		newHashStr := strings.TrimSpace(hashBuf.String())
		newHash, err := hex.DecodeString(newHashStr)
		if err != nil {
			return fmt.Errorf("failed to decode hex string from git: %w", err)
		}

		blobUpdates[diffFile.NewName] = newHash
		if diffFile.NewName != diffFile.OldName {
			blobUpdates[diffFile.OldName] = nil // Mark for deletion.
		}
	}

	newTreeSha, err := buildTreeRecursive(session.ctx, fsPath, headTreeHash, blobUpdates)
	if err != nil {
		return fmt.Errorf("failed to recursively build a tree: %w", err)
	}

	commitMsg := header.Title
	if header.Body != "" {
		commitMsg += "\n\n" + header.Body
	}

	env := append(os.Environ(),
		"GIT_DIR="+fsPath,
		"GIT_AUTHOR_NAME="+header.Author.Name,
		"GIT_AUTHOR_EMAIL="+header.Author.Email,
		"GIT_AUTHOR_DATE="+header.AuthorDate.Format(time.RFC3339),
	)
	commitCmd := exec.CommandContext(session.ctx, "git", "commit-tree", newTreeSha, "-p", headCommit.Hash.String(), "-m", commitMsg)
	commitCmd.Env = env

	var commitOut bytes.Buffer
	commitCmd.Stdout = &commitOut
	if err := commitCmd.Run(); err != nil {
		return fmt.Errorf("failed to commit tree: %w", err)
	}
	newCommitSha := strings.TrimSpace(commitOut.String())

	newBranchName := rand.Text()

	refCmd := exec.CommandContext(session.ctx, "git", "update-ref", "refs/heads/contrib/"+newBranchName, newCommitSha) //#nosec G204
	refCmd.Env = append(os.Environ(), "GIT_DIR="+fsPath)
	if err := refCmd.Run(); err != nil {
		return fmt.Errorf("failed to update ref: %w", err)
	}

	return nil
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
// SPDX-FileCopyrightText: Copyright (c) 2024 Robin Jarry <robin@jarry.cc>

package unsorted

import (
	"bytes"
	"context"
	"errors"
	"fmt"
	"io"
	"log/slog"
	"net"
	"strings"
	"time"

	"github.com/emersion/go-message"
	"github.com/emersion/go-smtp"
	"go.lindenii.runxiyu.org/forge/internal/misc"
	"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)

type lmtpHandler struct{}

type lmtpSession struct {
	from   string
	to     []string
	ctx    context.Context
	cancel context.CancelFunc
	s      Server
}

func (session *lmtpSession) Reset() {
	session.from = ""
	session.to = nil
}

func (session *lmtpSession) Logout() error {
	session.cancel()
	return nil
}

func (session *lmtpSession) AuthPlain(_, _ string) error {
	return nil
}

func (session *lmtpSession) Mail(from string, _ *smtp.MailOptions) error {
	session.from = from
	return nil
}

func (session *lmtpSession) Rcpt(to string, _ *smtp.RcptOptions) error {
	session.to = append(session.to, to)
	return nil
}

func (*lmtpHandler) NewSession(_ *smtp.Conn) (smtp.Session, error) {
	ctx, cancel := context.WithCancel(context.Background())
	session := &lmtpSession{
		ctx:    ctx,
		cancel: cancel,
	}
	return session, nil
}

func (s *Server) serveLMTP(listener net.Listener) error {
	smtpServer := smtp.NewServer(&lmtpHandler{})
	smtpServer.LMTP = true
	smtpServer.Domain = s.config.LMTP.Domain
	smtpServer.Addr = s.config.LMTP.Socket
	smtpServer.WriteTimeout = time.Duration(s.config.LMTP.WriteTimeout) * time.Second
	smtpServer.ReadTimeout = time.Duration(s.config.LMTP.ReadTimeout) * time.Second
	smtpServer.EnableSMTPUTF8 = true
	return smtpServer.Serve(listener)
}

func (session *lmtpSession) Data(r io.Reader) error {
	var (
		email *message.Entity
		from  string
		to    []string
		err   error
		buf   bytes.Buffer
		data  []byte
		n     int64
	)

	n, err = io.CopyN(&buf, r, session.s.config.LMTP.MaxSize)
	switch {
	case n == session.s.config.LMTP.MaxSize:
		err = errors.New("Message too big.")
		// drain whatever is left in the pipe
		_, _ = io.Copy(io.Discard, r)
		goto end
	case errors.Is(err, io.EOF):
		// message was smaller than max size
		break
	case err != nil:
		goto end
	}

	data = buf.Bytes()

	email, err = message.Read(bytes.NewReader(data))
	if err != nil && message.IsUnknownCharset(err) {
		goto end
	}

	switch strings.ToLower(email.Header.Get("Auto-Submitted")) {
	case "auto-generated", "auto-replied":
		// Disregard automatic emails like OOO replies
		slog.Info("ignoring automatic message",
			"from", session.from,
			"to", strings.Join(session.to, ","),
			"message-id", email.Header.Get("Message-Id"),
			"subject", email.Header.Get("Subject"),
		)
		goto end
	}

	slog.Info("message received",
		"from", session.from,
		"to", strings.Join(session.to, ","),
		"message-id", email.Header.Get("Message-Id"),
		"subject", email.Header.Get("Subject"),
	)

	// Make local copies of the values before to ensure the references will
	// still be valid when the task is run.
	from = session.from
	to = session.to

	_ = from

	for _, to := range to {
		if !strings.HasSuffix(to, "@"+session.s.config.LMTP.Domain) {
			continue
		}
		localPart := to[:len(to)-len("@"+session.s.config.LMTP.Domain)]
		var segments []string
		segments, err = misc.PathToSegments(localPart)
		if err != nil {
			// TODO: Should the entire email fail or should we just
			// notify them out of band?
			err = fmt.Errorf("cannot parse path: %w", err)
			goto end
		}
		sepIndex := -1
		for i, part := range segments {
			if part == "-" {
				sepIndex = i
				break
			}
		}
		if segments[len(segments)-1] == "" {
			segments = segments[:len(segments)-1] // We don't care about dir or not.
		}
		if sepIndex == -1 || len(segments) <= sepIndex+2 {
			err = errors.New("illegal path")
			goto end
		}

		mbox := bytes.Buffer{}
		if _, err = fmt.Fprint(&mbox, "From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001\r\n"); err != nil {
			slog.Error("error handling patch... malloc???", "error", err)
			goto end
		}
		data = bytes.ReplaceAll(data, []byte("\r\n"), []byte("\n"))
		if _, err = mbox.Write(data); err != nil {
			slog.Error("error handling patch... malloc???", "error", err)
			goto end
		}
		// TODO: Is mbox's From escaping necessary here?

		groupPath := segments[:sepIndex]
		moduleType := segments[sepIndex+1]
		moduleName := segments[sepIndex+2]
		switch moduleType {
		case "repos":
			err = session.s.lmtpHandlePatch(session, groupPath, moduleName, &mbox)
			if err != nil {
				slog.Error("error handling patch", "error", err)
				goto end
			}
		default:
			err = errors.New("Emailing any endpoint other than repositories, is not supported yet.") // TODO
			goto end
		}
	}

end:
	session.to = nil
	session.from = ""
	switch err {
	case nil:
		return nil
	default:
		return &smtp.SMTPError{
			Code:         550,
			Message:      "Permanent failure: " + err.Error(),
			EnhancedCode: [3]int{5, 7, 1},
		}
	}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"net/url"
	"strings"

	"go.lindenii.runxiyu.org/forge/internal/misc"
	"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)

// We don't use path.Join because it collapses multiple slashes into one.

// genSSHRemoteURL generates SSH remote URLs from a given group path and repo
// name.
func (s *Server) genSSHRemoteURL(groupPath []string, repoName string) string {
	return strings.TrimSuffix(s.config.SSH.Root, "/") + "/" + misc.SegmentsToURL(groupPath) + "/-/repos/" + url.PathEscape(repoName)
}

// genHTTPRemoteURL generates HTTP remote URLs from a given group path and repo
// name.
func (s *Server) genHTTPRemoteURL(groupPath []string, repoName string) string {
	return strings.TrimSuffix(s.config.HTTP.Root, "/") + "/" + misc.SegmentsToURL(groupPath) + "/-/repos/" + url.PathEscape(repoName)
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"html/template"
	"io/fs"

	"github.com/tdewolff/minify/v2"
	"github.com/tdewolff/minify/v2/html"
	"go.lindenii.runxiyu.org/forge/internal/embed"
	"go.lindenii.runxiyu.org/forge/internal/misc"
	"go.lindenii.runxiyu.org/forge/forged/internal/embed"
	"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)

// loadTemplates minifies and loads HTML templates.
func (s *Server) loadTemplates() (err error) {
	minifier := minify.New()
	minifierOptions := html.Minifier{
		TemplateDelims:      [2]string{"{{", "}}"},
		KeepDefaultAttrVals: true,
	} //exhaustruct:ignore
	minifier.Add("text/html", &minifierOptions)

	s.templates = template.New("templates").Funcs(template.FuncMap{
		"first_line":        misc.FirstLine,
		"path_escape":       misc.PathEscape,
		"query_escape":      misc.QueryEscape,
		"dereference_error": misc.DereferenceOrZero[error],
		"minus":             misc.Minus,
	})

	err = fs.WalkDir(embed.Resources, "templates", func(path string, d fs.DirEntry, err error) error {
		if err != nil {
			return err
		}
		if !d.IsDir() {
			content, err := fs.ReadFile(embed.Resources, path)
			if err != nil {
				return err
			}

			minified, err := minifier.Bytes("text/html", content)
			if err != nil {
				return err
			}

			_, err = s.templates.Parse(misc.BytesToString(minified))
			if err != nil {
				return err
			}
		}
		return nil
	})
	return err
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"errors"
	"html/template"
	"io/fs"
	"log"
	"log/slog"
	"net"
	"net/http"
	"os"
	"os/exec"
	"path/filepath"
	"syscall"
	"time"

	"go.lindenii.runxiyu.org/forge/internal/database"
	"go.lindenii.runxiyu.org/forge/internal/embed"
	"go.lindenii.runxiyu.org/forge/internal/irc"
	"go.lindenii.runxiyu.org/forge/internal/misc"
	"go.lindenii.runxiyu.org/forge/forged/internal/database"
	"go.lindenii.runxiyu.org/forge/forged/internal/embed"
	"go.lindenii.runxiyu.org/forge/forged/internal/irc"
	"go.lindenii.runxiyu.org/forge/forged/internal/misc"
	"go.lindenii.runxiyu.org/lindenii-common/cmap"
	goSSH "golang.org/x/crypto/ssh"
)

type Server struct {
	config Config

	database database.Database

	sourceHandler http.Handler
	staticHandler http.Handler

	// globalData is passed as "global" when rendering HTML templates.
	globalData map[string]any

	serverPubkeyString string
	serverPubkeyFP     string
	serverPubkey       goSSH.PublicKey

	// packPasses contains hook cookies mapped to their packPass.
	packPasses cmap.Map[string, packPass]

	templates *template.Template

	ircBot *irc.Bot

	ready bool
}

func NewServer(configPath string) (*Server, error) {
	s := &Server{
		globalData: make(map[string]any),
	} //exhaustruct:ignore

	if err := s.loadConfig(configPath); err != nil {
		return s, err
	}

	s.sourceHandler = http.StripPrefix(
		"/-/source/",
		http.FileServer(http.FS(embed.Source)),
	)
	staticFS, err := fs.Sub(embed.Resources, "static")
	if err != nil {
		return s, err
	}
	s.staticHandler = http.StripPrefix("/-/static/", http.FileServer(http.FS(staticFS)))
	s.globalData = map[string]any{
		"server_public_key_string":      &s.serverPubkeyString,
		"server_public_key_fingerprint": &s.serverPubkeyFP,
		"forge_version":                 version,
		// Some other ones are populated after config parsing
	}

	misc.NoneOrPanic(s.loadTemplates())
	misc.NoneOrPanic(misc.DeployBinary(misc.FirstOrPanic(embed.Resources.Open("git2d/git2d")), s.config.Git.DaemonPath))
	misc.NoneOrPanic(misc.DeployBinary(misc.FirstOrPanic(embed.Resources.Open("hookc/hookc")), filepath.Join(s.config.Hooks.Execs, "pre-receive")))
	misc.NoneOrPanic(os.Chmod(filepath.Join(s.config.Hooks.Execs, "pre-receive"), 0o755))

	s.ready = true

	return s, nil
}

func (s *Server) Run() error {
	if !s.ready {
		return errors.New("not ready")
	}

	// Launch Git2D
	go func() {
		cmd := exec.Command(s.config.Git.DaemonPath, s.config.Git.Socket) //#nosec G204
		cmd.Stderr = log.Writer()
		cmd.Stdout = log.Writer()
		if err := cmd.Run(); err != nil {
			panic(err)
		}
	}()

	// UNIX socket listener for hooks
	{
		hooksListener, err := net.Listen("unix", s.config.Hooks.Socket)
		if errors.Is(err, syscall.EADDRINUSE) {
			slog.Warn("removing existing socket", "path", s.config.Hooks.Socket)
			if err = syscall.Unlink(s.config.Hooks.Socket); err != nil {
				slog.Error("removing existing socket", "path", s.config.Hooks.Socket, "error", err)
				os.Exit(1)
			}
			if hooksListener, err = net.Listen("unix", s.config.Hooks.Socket); err != nil {
				slog.Error("listening hooks", "error", err)
				os.Exit(1)
			}
		} else if err != nil {
			slog.Error("listening hooks", "error", err)
			os.Exit(1)
		}
		slog.Info("listening hooks on unix", "path", s.config.Hooks.Socket)
		go func() {
			if err = s.serveGitHooks(hooksListener); err != nil {
				slog.Error("serving hooks", "error", err)
				os.Exit(1)
			}
		}()
	}

	// UNIX socket listener for LMTP
	{
		lmtpListener, err := net.Listen("unix", s.config.LMTP.Socket)
		if errors.Is(err, syscall.EADDRINUSE) {
			slog.Warn("removing existing socket", "path", s.config.LMTP.Socket)
			if err = syscall.Unlink(s.config.LMTP.Socket); err != nil {
				slog.Error("removing existing socket", "path", s.config.LMTP.Socket, "error", err)
				os.Exit(1)
			}
			if lmtpListener, err = net.Listen("unix", s.config.LMTP.Socket); err != nil {
				slog.Error("listening LMTP", "error", err)
				os.Exit(1)
			}
		} else if err != nil {
			slog.Error("listening LMTP", "error", err)
			os.Exit(1)
		}
		slog.Info("listening LMTP on unix", "path", s.config.LMTP.Socket)
		go func() {
			if err = s.serveLMTP(lmtpListener); err != nil {
				slog.Error("serving LMTP", "error", err)
				os.Exit(1)
			}
		}()
	}

	// SSH listener
	{
		sshListener, err := net.Listen(s.config.SSH.Net, s.config.SSH.Addr)
		if errors.Is(err, syscall.EADDRINUSE) && s.config.SSH.Net == "unix" {
			slog.Warn("removing existing socket", "path", s.config.SSH.Addr)
			if err = syscall.Unlink(s.config.SSH.Addr); err != nil {
				slog.Error("removing existing socket", "path", s.config.SSH.Addr, "error", err)
				os.Exit(1)
			}
			if sshListener, err = net.Listen(s.config.SSH.Net, s.config.SSH.Addr); err != nil {
				slog.Error("listening SSH", "error", err)
				os.Exit(1)
			}
		} else if err != nil {
			slog.Error("listening SSH", "error", err)
			os.Exit(1)
		}
		slog.Info("listening SSH on", "net", s.config.SSH.Net, "addr", s.config.SSH.Addr)
		go func() {
			if err = s.serveSSH(sshListener); err != nil {
				slog.Error("serving SSH", "error", err)
				os.Exit(1)
			}
		}()
	}

	// HTTP listener
	{
		httpListener, err := net.Listen(s.config.HTTP.Net, s.config.HTTP.Addr)
		if errors.Is(err, syscall.EADDRINUSE) && s.config.HTTP.Net == "unix" {
			slog.Warn("removing existing socket", "path", s.config.HTTP.Addr)
			if err = syscall.Unlink(s.config.HTTP.Addr); err != nil {
				slog.Error("removing existing socket", "path", s.config.HTTP.Addr, "error", err)
				os.Exit(1)
			}
			if httpListener, err = net.Listen(s.config.HTTP.Net, s.config.HTTP.Addr); err != nil {
				slog.Error("listening HTTP", "error", err)
				os.Exit(1)
			}
		} else if err != nil {
			slog.Error("listening HTTP", "error", err)
			os.Exit(1)
		}
		server := http.Server{
			Handler:      s,
			ReadTimeout:  time.Duration(s.config.HTTP.ReadTimeout) * time.Second,
			WriteTimeout: time.Duration(s.config.HTTP.ReadTimeout) * time.Second,
			IdleTimeout:  time.Duration(s.config.HTTP.ReadTimeout) * time.Second,
		} //exhaustruct:ignore
		slog.Info("listening HTTP on", "net", s.config.HTTP.Net, "addr", s.config.HTTP.Addr)
		go func() {
			if err = server.Serve(httpListener); err != nil && !errors.Is(err, http.ErrServerClosed) {
				slog.Error("serving HTTP", "error", err)
				os.Exit(1)
			}
		}()
	}

	s.ircBot = irc.NewBot(&s.config.IRC)
	// IRC bot
	go s.ircBot.ConnectLoop()

	select {}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"errors"
	"fmt"
	"os"
	"os/exec"

	gliderSSH "github.com/gliderlabs/ssh"
	"github.com/go-git/go-git/v5"
)

// packPass contains information known when handling incoming SSH connections
// that then needs to be used in hook socket connection handlers. See hookc(1).
type packPass struct {
	session      gliderSSH.Session
	repo         *git.Repository
	pubkey       string
	directAccess bool
	repoPath     string
	userID       int
	userType     string
	repoID       int
	groupPath    []string
	repoName     string
	contribReq   string
}

// sshHandleRecvPack handles attempts to push to repos.
func (s *Server) sshHandleRecvPack(session gliderSSH.Session, pubkey, repoIdentifier string) (err error) {
	groupPath, repoName, repoID, repoPath, directAccess, contribReq, userType, userID, err := s.getRepoInfo2(session.Context(), repoIdentifier, pubkey)
	if err != nil {
		return err
	}
	repo, err := git.PlainOpen(repoPath)
	if err != nil {
		return err
	}

	repoConf, err := repo.Config()
	if err != nil {
		return err
	}

	repoConfCore := repoConf.Raw.Section("core")
	if repoConfCore == nil {
		return errors.New("repository has no core section in config")
	}

	hooksPath := repoConfCore.OptionAll("hooksPath")
	if len(hooksPath) != 1 || hooksPath[0] != s.config.Hooks.Execs {
		return errors.New("repository has hooksPath set to an unexpected value")
	}

	if !directAccess {
		switch contribReq {
		case "closed":
			if !directAccess {
				return errors.New("you need direct access to push to this repo")
			}
		case "registered_user":
			if userType != "registered" {
				return errors.New("you need to be a registered user to push to this repo")
			}
		case "ssh_pubkey":
			fallthrough
		case "federated":
			if pubkey == "" {
				return errors.New("you need to have an SSH public key to push to this repo")
			}
			if userType == "" {
				userID, err = s.addUserSSH(session.Context(), pubkey)
				if err != nil {
					return err
				}
				fmt.Fprintln(session.Stderr(), "you are now registered as user ID", userID)
				userType = "pubkey_only"
			}

		case "public":
		default:
			panic("unknown contrib_requirements value " + contribReq)
		}
	}

	cookie, err := randomUrlsafeStr(16)
	if err != nil {
		fmt.Fprintln(session.Stderr(), "Error while generating cookie:", err)
	}

	s.packPasses.Store(cookie, packPass{
		session:      session,
		pubkey:       pubkey,
		directAccess: directAccess,
		repoPath:     repoPath,
		userID:       userID,
		repoID:       repoID,
		groupPath:    groupPath,
		repoName:     repoName,
		repo:         repo,
		contribReq:   contribReq,
		userType:     userType,
	})
	defer s.packPasses.Delete(cookie)
	// The Delete won't execute until proc.Wait returns unless something
	// horribly wrong such as a panic occurs.

	proc := exec.CommandContext(session.Context(), "git-receive-pack", repoPath)
	proc.Env = append(os.Environ(),
		"LINDENII_FORGE_HOOKS_SOCKET_PATH="+s.config.Hooks.Socket,
		"LINDENII_FORGE_HOOKS_COOKIE="+cookie,
	)
	proc.Stdin = session
	proc.Stdout = session
	proc.Stderr = session.Stderr()

	if err = proc.Start(); err != nil {
		fmt.Fprintln(session.Stderr(), "Error while starting process:", err)
		return err
	}

	err = proc.Wait()
	if err != nil {
		fmt.Fprintln(session.Stderr(), "Error while waiting for process:", err)
	}

	return err
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"fmt"
	"os"
	"os/exec"

	glider_ssh "github.com/gliderlabs/ssh"
)

// sshHandleUploadPack handles clones/fetches. It just uses git-upload-pack
// and has no ACL checks.
func (s *Server) sshHandleUploadPack(session glider_ssh.Session, pubkey, repoIdentifier string) (err error) {
	var repoPath string
	if _, _, _, repoPath, _, _, _, _, err = s.getRepoInfo2(session.Context(), repoIdentifier, pubkey); err != nil {
		return err
	}

	proc := exec.CommandContext(session.Context(), "git-upload-pack", repoPath)
	proc.Env = append(os.Environ(), "LINDENII_FORGE_HOOKS_SOCKET_PATH="+s.config.Hooks.Socket)
	proc.Stdin = session
	proc.Stdout = session
	proc.Stderr = session.Stderr()

	if err = proc.Start(); err != nil {
		fmt.Fprintln(session.Stderr(), "Error while starting process:", err)
		return err
	}

	err = proc.Wait()
	if err != nil {
		fmt.Fprintln(session.Stderr(), "Error while waiting for process:", err)
	}

	return err
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"fmt"
	"log/slog"
	"net"
	"os"
	"strings"

	gliderSSH "github.com/gliderlabs/ssh"
	"go.lindenii.runxiyu.org/forge/internal/ansiec"
	"go.lindenii.runxiyu.org/forge/internal/misc"
	"go.lindenii.runxiyu.org/forge/forged/internal/ansiec"
	"go.lindenii.runxiyu.org/forge/forged/internal/misc"
	goSSH "golang.org/x/crypto/ssh"
)

// serveSSH serves SSH on a [net.Listener]. The listener should generally be a
// TCP listener, although AF_UNIX SOCK_STREAM listeners may be appropriate in
// rare cases.
func (s *Server) serveSSH(listener net.Listener) error {
	var hostKeyBytes []byte
	var hostKey goSSH.Signer
	var err error
	var server *gliderSSH.Server

	if hostKeyBytes, err = os.ReadFile(s.config.SSH.Key); err != nil {
		return err
	}

	if hostKey, err = goSSH.ParsePrivateKey(hostKeyBytes); err != nil {
		return err
	}

	s.serverPubkey = hostKey.PublicKey()
	s.serverPubkeyString = misc.BytesToString(goSSH.MarshalAuthorizedKey(s.serverPubkey))
	s.serverPubkeyFP = goSSH.FingerprintSHA256(s.serverPubkey)

	server = &gliderSSH.Server{
		Handler: func(session gliderSSH.Session) {
			clientPubkey := session.PublicKey()
			var clientPubkeyStr string
			if clientPubkey != nil {
				clientPubkeyStr = strings.TrimSuffix(misc.BytesToString(goSSH.MarshalAuthorizedKey(clientPubkey)), "\n")
			}

			slog.Info("incoming ssh", "addr", session.RemoteAddr().String(), "key", clientPubkeyStr, "command", session.RawCommand())
			fmt.Fprintln(session.Stderr(), ansiec.Blue+"Lindenii Forge "+version+", source at "+strings.TrimSuffix(s.config.HTTP.Root, "/")+"/-/source/"+ansiec.Reset+"\r")

			cmd := session.Command()

			if len(cmd) < 2 {
				fmt.Fprintln(session.Stderr(), "Insufficient arguments\r")
				return
			}

			switch cmd[0] {
			case "git-upload-pack":
				if len(cmd) > 2 {
					fmt.Fprintln(session.Stderr(), "Too many arguments\r")
					return
				}
				err = s.sshHandleUploadPack(session, clientPubkeyStr, cmd[1])
			case "git-receive-pack":
				if len(cmd) > 2 {
					fmt.Fprintln(session.Stderr(), "Too many arguments\r")
					return
				}
				err = s.sshHandleRecvPack(session, clientPubkeyStr, cmd[1])
			default:
				fmt.Fprintln(session.Stderr(), "Unsupported command: "+cmd[0]+"\r")
				return
			}
			if err != nil {
				fmt.Fprintln(session.Stderr(), err.Error())
				return
			}
		},
		PublicKeyHandler:           func(_ gliderSSH.Context, _ gliderSSH.PublicKey) bool { return true },
		KeyboardInteractiveHandler: func(_ gliderSSH.Context, _ goSSH.KeyboardInteractiveChallenge) bool { return true },
		// It is intentional that we do not check any credentials and accept all connections.
		// This allows all users to connect and clone repositories. However, the public key
		// is passed to handlers, so e.g. the push handler could check the key and reject the
		// push if it needs to.
	} //exhaustruct:ignore

	server.AddHostKey(hostKey)

	if err = server.Serve(listener); err != nil {
		slog.Error("error serving SSH", "error", err.Error())
		os.Exit(1)
	}

	return nil
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"context"
	"errors"
	"fmt"
	"io"
	"net/url"

	"go.lindenii.runxiyu.org/forge/internal/ansiec"
	"go.lindenii.runxiyu.org/forge/internal/misc"
	"go.lindenii.runxiyu.org/forge/forged/internal/ansiec"
	"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)

var errIllegalSSHRepoPath = errors.New("illegal SSH repo path")

// getRepoInfo2 also fetches repo information... it should be deprecated and
// implemented in individual handlers.
func (s *Server) getRepoInfo2(ctx context.Context, sshPath, sshPubkey string) (groupPath []string, repoName string, repoID int, repoPath string, directAccess bool, contribReq, userType string, userID int, err error) {
	var segments []string
	var sepIndex int
	var moduleType, moduleName string

	segments, err = misc.PathToSegments(sshPath)
	if err != nil {
		return
	}

	for i, segment := range segments {
		var err error
		segments[i], err = url.PathUnescape(segment)
		if err != nil {
			return []string{}, "", 0, "", false, "", "", 0, err
		}
	}

	if segments[0] == "-" {
		return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath
	}

	sepIndex = -1
	for i, part := range segments {
		if part == "-" {
			sepIndex = i
			break
		}
	}
	if segments[len(segments)-1] == "" {
		segments = segments[:len(segments)-1]
	}

	switch {
	case sepIndex == -1:
		return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath
	case len(segments) <= sepIndex+2:
		return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath
	}

	groupPath = segments[:sepIndex]
	moduleType = segments[sepIndex+1]
	moduleName = segments[sepIndex+2]
	repoName = moduleName
	switch moduleType {
	case "repos":
		_1, _2, _3, _4, _5, _6, _7 := s.getRepoInfo(ctx, groupPath, moduleName, sshPubkey)
		return groupPath, repoName, _1, _2, _3, _4, _5, _6, _7
	default:
		return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath
	}
}

// writeRedError is a helper function that basically does a Fprintf but makes
// the entire thing red, in terms of ANSI escape sequences. It's useful when
// producing error messages on SSH connections.
func writeRedError(w io.Writer, format string, args ...any) {
	fmt.Fprintln(w, ansiec.Red+fmt.Sprintf(format, args...)+ansiec.Reset)
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

// Package unsorted is where unsorted Go files from the old structure are kept.
package unsorted
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

import (
	"context"

	"github.com/jackc/pgx/v5"
)

// addUserSSH adds a new user solely based on their SSH public key.
//
// TODO: Audit all users of this function.
func (s *Server) addUserSSH(ctx context.Context, pubkey string) (userID int, err error) {
	var txn pgx.Tx

	if txn, err = s.database.Begin(ctx); err != nil {
		return
	}
	defer func() {
		_ = txn.Rollback(ctx)
	}()

	if err = txn.QueryRow(ctx, `INSERT INTO users (type) VALUES ('pubkey_only') RETURNING id`).Scan(&userID); err != nil {
		return
	}

	if _, err = txn.Exec(ctx, `INSERT INTO ssh_public_keys (key_string, user_id) VALUES ($1, $2)`, pubkey, userID); err != nil {
		return
	}

	err = txn.Commit(ctx)
	return
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package unsorted

var version = "unknown"
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

package web

import (
	"html/template"
	"net/http"
)

func ErrorPage404(templates *template.Template, w http.ResponseWriter, params map[string]any) {
	w.WriteHeader(http.StatusNotFound)
	_ = templates.ExecuteTemplate(w, "404", params)
}

func ErrorPage400(templates *template.Template, w http.ResponseWriter, params map[string]any, msg string) {
	w.WriteHeader(http.StatusBadRequest)
	params["complete_error_msg"] = msg
	_ = templates.ExecuteTemplate(w, "400", params)
}

func ErrorPage400Colon(templates *template.Template, w http.ResponseWriter, params map[string]any) {
	w.WriteHeader(http.StatusBadRequest)
	_ = templates.ExecuteTemplate(w, "400_colon", params)
}

func ErrorPage403(templates *template.Template, w http.ResponseWriter, params map[string]any, msg string) {
	w.WriteHeader(http.StatusForbidden)
	params["complete_error_msg"] = msg
	_ = templates.ExecuteTemplate(w, "403", params)
}

func ErrorPage451(templates *template.Template, w http.ResponseWriter, params map[string]any, msg string) {
	w.WriteHeader(http.StatusUnavailableForLegalReasons)
	params["complete_error_msg"] = msg
	_ = templates.ExecuteTemplate(w, "451", params)
}

func ErrorPage500(templates *template.Template, w http.ResponseWriter, params map[string]any, msg string) {
	w.WriteHeader(http.StatusInternalServerError)
	params["complete_error_msg"] = msg
	_ = templates.ExecuteTemplate(w, "500", params)
}

func ErrorPage501(templates *template.Template, w http.ResponseWriter, params map[string]any) {
	w.WriteHeader(http.StatusNotImplemented)
	_ = templates.ExecuteTemplate(w, "501", params)
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>

// Package web provides web-facing components of the forge.
package web