Hi… I am well aware that this diff view is very suboptimal. It will be fixed when the refactored server comes along!
ssh/recv, hooks: Create MRs on push, reject pushes to others' MRs
package main import ( "context" ) // get_path_perm_by_group_repo_key returns the filesystem path and direct // access permission for a given repo and a provided ssh public key.
func get_path_perm_by_group_repo_key(ctx context.Context, group_name, repo_name, ssh_pubkey string) (filesystem_path string, access bool, contrib_requirements string, user_type string, err error) {
func get_path_perm_by_group_repo_key(ctx context.Context, group_name, repo_name, ssh_pubkey string) (repo_id int, filesystem_path string, access bool, contrib_requirements string, user_type string, user_id int, err error) {
err = database.QueryRow(ctx, `SELECT
r.id,
r.filesystem_path, CASE WHEN ugr.user_id IS NOT NULL THEN TRUE ELSE FALSE END AS has_role_in_group, r.contrib_requirements,
COALESCE(u.type, '')
COALESCE(u.type, ''), COALESCE(u.id, 0)
FROM groups g JOIN repos r ON r.group_id = g.id LEFT JOIN ssh_public_keys s ON s.key_string = $3 LEFT JOIN users u ON u.id = s.user_id LEFT JOIN user_group_roles ugr ON ugr.group_id = g.id AND ugr.user_id = u.id WHERE g.name = $1 AND r.name = $2;`, group_name, repo_name, ssh_pubkey,
).Scan(&filesystem_path, &access, &contrib_requirements, &user_type)
).Scan(&repo_id, &filesystem_path, &access, &contrib_requirements, &user_type, &user_id)
return }
package main import ( "bytes"
"context"
"encoding/binary" "errors" "fmt" "io" "net" "os" "path/filepath" "strings" "syscall"
"github.com/jackc/pgx/v5"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
)
var (
err_get_fd = errors.New("unable to get file descriptor")
err_get_ucred = errors.New("failed getsockopt")
)
// hooks_handle_connection handles a connection from git_hooks_client via the
// unix socket.
func hooks_handle_connection(conn net.Conn) {
defer conn.Close()
ctx, cancel := context.WithCancel(context.Background()) defer cancel()
// There aren't reasonable cases where someone would run this as
// another user.
ucred, err := get_ucred(conn)
if err != nil {
if _, err := conn.Write([]byte{1}); err != nil {
return
}
fmt.Fprintln(conn, "Unable to get peer credentials:", err.Error())
return
}
if ucred.Uid != uint32(os.Getuid()) {
if _, err := conn.Write([]byte{1}); err != nil {
return
}
fmt.Fprintln(conn, "UID mismatch")
return
}
cookie := make([]byte, 64)
_, err = conn.Read(cookie)
if err != nil {
if _, err := conn.Write([]byte{1}); err != nil {
return
}
fmt.Fprintln(conn, "Failed to read cookie:", err.Error())
return
}
pack_to_hook, ok := pack_to_hook_by_cookie.Load(string(cookie))
if !ok {
if _, err := conn.Write([]byte{1}); err != nil {
return
}
fmt.Fprintln(conn, "Invalid handler cookie")
return
}
ssh_stderr := pack_to_hook.session.Stderr()
hook_return_value := func() byte {
var argc64 uint64
err = binary.Read(conn, binary.NativeEndian, &argc64)
if err != nil {
fmt.Fprintln(ssh_stderr, "Failed to read argc:", err.Error())
return 1
}
var args []string
for i := uint64(0); i < argc64; i++ {
var arg bytes.Buffer
for {
b := make([]byte, 1)
n, err := conn.Read(b)
if err != nil || n != 1 {
fmt.Fprintln(ssh_stderr, "Failed to read arg:", err.Error())
return 1
}
if b[0] == 0 {
break
}
arg.WriteByte(b[0])
}
args = append(args, arg.String())
}
var stdin bytes.Buffer
_, err = io.Copy(&stdin, conn)
if err != nil {
fmt.Fprintln(conn, "Failed to read to the stdin buffer:", err.Error())
}
switch filepath.Base(args[0]) {
case "pre-receive":
if pack_to_hook.direct_access {
return 0
} else {
all_ok := true
for {
line, err := stdin.ReadString('\n')
if errors.Is(err, io.EOF) {
break
}
line = line[:len(line)-1]
old_oid, rest, found := strings.Cut(line, " ")
if !found {
fmt.Fprintln(ssh_stderr, "Invalid pre-receive line:", line)
return 1
}
new_oid, ref_name, found := strings.Cut(rest, " ")
if !found {
fmt.Fprintln(ssh_stderr, "Invalid pre-receive line:", line)
return 1
}
if strings.HasPrefix(ref_name, "refs/heads/contrib/") {
if all_zero_num_string(old_oid) { // New branch
fmt.Fprintln(ssh_stderr, "Acceptable push to new contrib branch: "+ref_name)
// TODO: Create a merge request. If that fails, // we should just reject this entire push // immediately.
_, err = database.Exec(ctx,
"INSERT INTO merge_requests (repo_id, creator, source_ref, status) VALUES ($1, $2, $3, 'open')",
pack_to_hook.repo_id, pack_to_hook.user_id, strings.TrimPrefix(ref_name, "refs/heads/contrib/"),
)
if err != nil {
fmt.Fprintln(ssh_stderr, "Error creating merge request:", err.Error())
return 1
}
} else { // Existing contrib branch
// TODO: Check if the current user is authorized // to push to this contrib branch.
var existing_merge_request_user_id int
err = database.QueryRow(ctx,
"SELECT creator FROM merge_requests WHERE source_ref = $1 AND repo_id = $2",
strings.TrimPrefix(ref_name, "refs/heads/contrib/"), pack_to_hook.repo_id,
).Scan(&existing_merge_request_user_id)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
fmt.Fprintln(ssh_stderr, "No existing merge request for existing contrib branch:", err.Error())
} else {
fmt.Fprintln(ssh_stderr, "Error querying for existing merge request:", err.Error())
}
return 1
}
if existing_merge_request_user_id != pack_to_hook.user_id {
all_ok = false
fmt.Fprintln(ssh_stderr, "Rejecting push to existing contrib branch owned by another user:", ref_name)
continue
}
repo, err := git.PlainOpen(pack_to_hook.repo_path)
if err != nil {
fmt.Fprintln(ssh_stderr, "Daemon failed to open repo:", err.Error())
return 1
}
old_hash := plumbing.NewHash(old_oid)
old_commit, err := repo.CommitObject(old_hash)
if err != nil {
fmt.Fprintln(ssh_stderr, "Daemon failed to get old commit:", err.Error())
return 1
}
// Potential BUG: I'm not sure if new_commit is guaranteed to be
// detectable as they haven't been merged into the main repo's
// objects yet. But it seems to work, and I don't think there's
// any reason for this to only work intermitently.
new_hash := plumbing.NewHash(new_oid)
new_commit, err := repo.CommitObject(new_hash)
if err != nil {
fmt.Fprintln(ssh_stderr, "Daemon failed to get new commit:", err.Error())
return 1
}
is_ancestor, err := old_commit.IsAncestor(new_commit)
if err != nil {
fmt.Fprintln(ssh_stderr, "Daemon failed to check if old commit is ancestor:", err.Error())
return 1
}
if !is_ancestor {
// TODO: Create MR snapshot ref instead
all_ok = false
fmt.Fprintln(ssh_stderr, "Rejecting force push to contrib branch: "+ref_name)
continue
}
fmt.Fprintln(ssh_stderr, "Acceptable push to existing contrib branch: "+ref_name)
}
} else { // Non-contrib branch
all_ok = false
fmt.Fprintln(ssh_stderr, "Rejecting push to non-contrib branch: "+ref_name)
}
}
if all_ok {
return 0
} else {
return 1
}
}
default:
fmt.Fprintln(ssh_stderr, "Invalid hook:", args[0])
return 1
}
}()
_, _ = conn.Write([]byte{hook_return_value})
}
func serve_git_hooks(listener net.Listener) error {
for {
conn, err := listener.Accept()
if err != nil {
return err
}
go hooks_handle_connection(conn)
}
}
func get_ucred(conn net.Conn) (*syscall.Ucred, error) {
unix_conn := conn.(*net.UnixConn)
fd, err := unix_conn.File()
if err != nil {
return nil, err_get_fd
}
defer fd.Close()
ucred, err := syscall.GetsockoptUcred(int(fd.Fd()), syscall.SOL_SOCKET, syscall.SO_PEERCRED)
if err != nil {
return nil, err_get_ucred
}
return ucred, nil
}
func all_zero_num_string(s string) bool {
for _, r := range s {
if r != '0' {
return false
}
}
return true
}
CREATE TABLE groups (
id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
name TEXT NOT NULL UNIQUE,
description TEXT
);
CREATE TABLE repos (
id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
group_id INTEGER NOT NULL REFERENCES groups(id) ON DELETE RESTRICT, -- I mean, should be CASCADE but deleting Git repos on disk also needs to be considered
contrib_requirements TEXT NOT NULL CHECK (contrib_requirements IN ('closed', 'registered_user', 'ssh_pubkey', 'public')),
name TEXT NOT NULL,
UNIQUE(group_id, name),
description TEXT,
filesystem_path TEXT
);
CREATE TABLE ticket_trackers (
id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
group_id INTEGER NOT NULL REFERENCES groups(id) ON DELETE RESTRICT,
name TEXT NOT NULL,
UNIQUE(group_id, name),
description TEXT
);
CREATE TABLE tickets (
id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
tracker_id INTEGER NOT NULL REFERENCES ticket_trackers(id) ON DELETE CASCADE,
title TEXT NOT NULL,
description TEXT
);
CREATE TABLE mailing_lists (
id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
group_id INTEGER NOT NULL REFERENCES groups(id) ON DELETE RESTRICT,
name TEXT NOT NULL,
UNIQUE(group_id, name),
description TEXT
);
CREATE TABLE mailing_list_emails (
id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
list_id INTEGER NOT NULL REFERENCES mailing_lists(id) ON DELETE CASCADE,
title TEXT NOT NULL,
sender TEXT NOT NULL,
date TIMESTAMP NOT NULL,
content BYTEA NOT NULL
);
CREATE TABLE users (
id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
username TEXT UNIQUE,
type TEXT NOT NULL CHECK (type IN ('pubkey_only', 'registered')),
password TEXT
);
CREATE TABLE ssh_public_keys (
id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
key_string TEXT NOT NULL,
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
CONSTRAINT unique_key_string EXCLUDE USING HASH (key_string WITH =)
);
CREATE TABLE sessions (
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
session_id TEXT PRIMARY KEY NOT NULL,
UNIQUE(user_id, session_id)
);
// TODO:
-- TODO:
CREATE TABLE merge_requests ( id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
title TEXT NOT NULL,
title TEXT,
repo_id INTEGER NOT NULL REFERENCES repos(id) ON DELETE CASCADE,
creator INTEGER NOT NULL REFERENCES users(id) ON DELETE SET NULL,
creator INTEGER REFERENCES users(id) ON DELETE SET NULL,
source_ref TEXT NOT NULL,
destination_branch TEXT NOT NULL,
destination_branch TEXT,
status TEXT NOT NULL CHECK (status IN ('open', 'merged', 'closed')),
UNIQUE (repo_id, source_ref, destination_branch),
UNIQUE (repo_id, id)
);
CREATE TABLE user_group_roles (
group_id INTEGER NOT NULL REFERENCES groups(id) ON DELETE CASCADE,
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
PRIMARY KEY(user_id, group_id)
);
package main
import (
"errors"
"fmt"
"os"
"os/exec"
glider_ssh "github.com/gliderlabs/ssh"
"go.lindenii.runxiyu.org/lindenii-common/cmap"
)
type pack_to_hook_t struct {
session glider_ssh.Session
pubkey string
direct_access bool
repo_path string
user_id int repo_id int
}
var pack_to_hook_by_cookie = cmap.Map[string, pack_to_hook_t]{}
// ssh_handle_receive_pack handles attempts to push to repos.
func ssh_handle_receive_pack(session glider_ssh.Session, pubkey string, repo_identifier string) (err error) {
repo_path, direct_access, contrib_requirements, user_type, err := get_repo_path_perms_from_ssh_path_pubkey(session.Context(), repo_identifier, pubkey)
repo_id, repo_path, direct_access, contrib_requirements, user_type, user_id, err := get_repo_path_perms_from_ssh_path_pubkey(session.Context(), repo_identifier, pubkey)
if err != nil {
return err
}
if !direct_access {
switch contrib_requirements {
case "closed":
if !direct_access {
return errors.New("You need direct access to push to this repo.")
}
case "registered_user":
if user_type != "registered" {
return errors.New("You need to be a registered user to push to this repo.")
}
case "ssh_pubkey":
if pubkey == "" {
return errors.New("You need to have an SSH public key to push to this repo.")
}
if user_type == "" {
user_id, err := add_user_ssh(session.Context(), pubkey)
user_id, err = add_user_ssh(session.Context(), pubkey)
if err != nil {
return err
}
fmt.Fprintln(session.Stderr(), "You are now registered as user ID", user_id)
}
case "public":
default:
panic("unknown contrib_requirements value " + contrib_requirements)
}
}
cookie, err := random_urlsafe_string(16)
if err != nil {
fmt.Fprintln(session.Stderr(), "Error while generating cookie:", err)
}
pack_to_hook_by_cookie.Store(cookie, pack_to_hook_t{
session: session,
pubkey: pubkey,
direct_access: direct_access,
repo_path: repo_path,
user_id: user_id, repo_id: repo_id,
})
defer pack_to_hook_by_cookie.Delete(cookie)
// The Delete won't execute until proc.Wait returns unless something
// horribly wrong such as a panic occurs.
proc := exec.CommandContext(session.Context(), "git-receive-pack", repo_path)
proc.Env = append(os.Environ(),
"LINDENII_FORGE_HOOKS_SOCKET_PATH="+config.Hooks.Socket,
"LINDENII_FORGE_HOOKS_COOKIE="+cookie,
)
proc.Stdin = session
proc.Stdout = session
proc.Stderr = session.Stderr()
err = proc.Start()
if err != nil {
fmt.Fprintln(session.Stderr(), "Error while starting process:", err)
return err
}
err = proc.Wait()
if exitError, ok := err.(*exec.ExitError); ok {
fmt.Fprintln(session.Stderr(), "Process exited with error", exitError.ExitCode())
} else if err != nil {
fmt.Fprintln(session.Stderr(), "Error while waiting for process:", err)
}
return err
}
package main
import (
"fmt"
"os"
"os/exec"
glider_ssh "github.com/gliderlabs/ssh"
)
// ssh_handle_upload_pack handles clones/fetches. It just uses git-upload-pack
// and has no ACL checks.
func ssh_handle_upload_pack(session glider_ssh.Session, pubkey string, repo_identifier string) (err error) {
repo_path, _, _, _, err := get_repo_path_perms_from_ssh_path_pubkey(session.Context(), repo_identifier, pubkey)
_, repo_path, _, _, _, _, err := get_repo_path_perms_from_ssh_path_pubkey(session.Context(), repo_identifier, pubkey)
if err != nil {
return err
}
proc := exec.CommandContext(session.Context(), "git-upload-pack", repo_path)
proc.Env = append(os.Environ(), "LINDENII_FORGE_HOOKS_SOCKET_PATH="+config.Hooks.Socket)
proc.Stdin = session
proc.Stdout = session
proc.Stderr = session.Stderr()
err = proc.Start()
if err != nil {
fmt.Fprintln(session.Stderr(), "Error while starting process:", err)
return err
}
err = proc.Wait()
if exitError, ok := err.(*exec.ExitError); ok {
fmt.Fprintln(session.Stderr(), "Process exited with error", exitError.ExitCode())
} else if err != nil {
fmt.Fprintln(session.Stderr(), "Error while waiting for process:", err)
}
return err
}
package main
import (
"context"
"errors"
"net/url"
"strings"
)
var err_ssh_illegal_endpoint = errors.New("illegal endpoint during SSH access")
func get_repo_path_perms_from_ssh_path_pubkey(ctx context.Context, ssh_path string, ssh_pubkey string) (repo_path string, direct_access bool, contrib_requirements string, user_type string, err error) {
func get_repo_path_perms_from_ssh_path_pubkey(ctx context.Context, ssh_path string, ssh_pubkey string) (repo_id int, repo_path string, direct_access bool, contrib_requirements string, user_type string, user_id int, err error) {
segments := strings.Split(strings.TrimPrefix(ssh_path, "/"), "/")
for i, segment := range segments {
var err error
segments[i], err = url.PathUnescape(segment)
if err != nil {
return "", false, "", "", err
return 0, "", false, "", "", 0, err
}
}
if segments[0] == ":" {
return "", false, "", "", err_ssh_illegal_endpoint
return 0, "", false, "", "", 0, err_ssh_illegal_endpoint
}
separator_index := -1
for i, part := range segments {
if part == ":" {
separator_index = i
break
}
}
if segments[len(segments)-1] == "" {
segments = segments[:len(segments)-1]
}
switch {
case separator_index == -1:
return "", false, "", "", err_ssh_illegal_endpoint
return 0, "", false, "", "", 0, err_ssh_illegal_endpoint
case len(segments) <= separator_index+2:
return "", false, "", "", err_ssh_illegal_endpoint
return 0, "", false, "", "", 0, err_ssh_illegal_endpoint
}
group_name := segments[0]
module_type := segments[separator_index+1]
module_name := segments[separator_index+2]
switch module_type {
case "repos":
return get_path_perm_by_group_repo_key(ctx, group_name, module_name, ssh_pubkey)
default:
return "", false, "", "", err_ssh_illegal_endpoint
return 0, "", false, "", "", 0, err_ssh_illegal_endpoint
} }