Hi… I am well aware that this diff view is very suboptimal. It will be fixed when the refactored server comes along!
Some mass renaming
http {
web {
# What network transport should we listen on?
# Examples: tcp tcp4 tcp6 unix
net tcp
# What address to listen on?
# Examples for net tcp*: 127.0.0.1:8080 :80
# Example for unix: /var/run/lindenii/forge/http.sock
addr :8080
# How many seconds should cookies be remembered before they are purged?
cookie_expiry 604800
# What is the canonical URL of the web root?
root https://forge.example.org
# General HTTP server context timeout settings. It's recommended to
# set them slightly higher than usual as Git operations over large
# repos may take a long time.
read_timeout 120
write_timeout 1800
idle_timeout 120
# Are we running behind a reverse proxy? If so, we will trust
# X-Forwarded-For headers.
reverse_proxy true
}
irc {
tls true
net tcp
addr irc.runxiyu.org:6697
sendq 6000
nick forge-test
user forge
gecos "Lindenii Forge Test"
}
git {
# Where should newly-created Git repositories be stored?
repo_dir /var/lib/lindenii/forge/repos
# Where should git2d listen on?
socket /var/run/lindenii/forge/git2d.sock
# Where should we put git2d?
daemon_path /usr/libexec/lindenii/forge/git2d
}
ssh {
# What network transport should we listen on?
# This should be "tcp" in almost all cases.
net tcp
# What address to listen on?
addr :22
# What is the path to the SSH host key? Generate it with ssh-keygen.
# The key must have an empty password.
key /etc/lindenii/ssh_host_ed25519_key
# What is the canonical SSH URL?
root ssh://forge.example.org
}
general {
title "Test Forge"
}
db {
# What type of database are we connecting to?
# Currently only "postgres" is supported.
type postgres
# What is the connection string?
conn postgresql:///lindenii-forge?host=/var/run/postgresql
}
hooks {
# On which UNIX domain socket should we listen for hook callbacks on?
socket /var/run/lindenii/forge/hooks.sock
# Where should hook executables be put?
execs /usr/libexec/lindenii/forge/hooks
}
lmtp {
# On which UNIX domain socket should we listen for LMTP on?
socket /var/run/lindenii/forge/lmtp.sock
# What's the maximum acceptable message size?
max_size 1000000
# What is our domainpart?
domain forge.example.org
# General timeouts
read_timeout 300
write_timeout 300
}
pprof {
# What network to listen on for pprof?
net tcp
# What address to listen on?
addr localhost:28471
}
version: "2" linters: default: all
disable:
- depguard
- err113 # dynamically defined errors are fine for our purposes
- forcetypeassert # type assertion failures are usually programming errors
- gochecknoinits # we use inits sparingly for good reasons
- godox # they're just used as markers for where needs improvements
- ireturn # doesn't work well with how we use generics
- lll # long lines are acceptable
- mnd # it's a bit ridiculous to replace all of them
- nakedret # patterns should be consistent
- nonamedreturns # i like named returns
- wrapcheck # wrapping all errors is just not necessary
- varnamelen # "from" and "to" are very valid
- containedctx
- godot
- dogsled
- maintidx # e
- nestif # e
- gocognit # e
- gocyclo # e
- dupl # e
- cyclop # e
- goconst # e
- funlen # e
- wsl # e
- nlreturn # e
- unused # e
- exhaustruct # e
linters-settings:
revive:
rules:
- name: error-strings
disabled: true
issues:
max-issues-per-linter: 0
max-same-issues: 0
# disable: # - depguard # - err113 # dynamically defined errors are fine for our purposes # - forcetypeassert # type assertion failures are usually programming errors # - gochecknoinits # we use inits sparingly for good reasons # - godox # they're just used as markers for where needs improvements # - ireturn # doesn't work well with how we use generics # - lll # long lines are acceptable # - mnd # it's a bit ridiculous to replace all of them # - nakedret # patterns should be consistent # - nonamedreturns # i like named returns # - wrapcheck # wrapping all errors is just not necessary # - varnamelen # "from" and "to" are very valid # - containedctx # - godot # - dogsled # - maintidx # e # - nestif # e # - gocognit # e # - gocyclo # e # - dupl # e # - cyclop # e # - goconst # e # - funlen # e # - wsl # e # - nlreturn # e # - unused # e # - exhaustruct # e # # linters-settings: # revive: # rules: # - name: error-strings # disabled: true # # issues: # max-issues-per-linter: 0 # max-same-issues: 0
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package ansiec const ( Black = "\x1b[30m" Red = "\x1b[31m" Green = "\x1b[32m" Yellow = "\x1b[33m" Blue = "\x1b[34m" Magenta = "\x1b[35m" Cyan = "\x1b[36m" White = "\x1b[37m" ) const ( BrightBlack = "\x1b[30;1m" BrightRed = "\x1b[31;1m" BrightGreen = "\x1b[32;1m" BrightYellow = "\x1b[33;1m" BrightBlue = "\x1b[34;1m" BrightMagenta = "\x1b[35;1m" BrightCyan = "\x1b[36;1m" BrightWhite = "\x1b[37;1m" )
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> // Package ansiec provides definitions for ANSI escape sequences. package ansiec
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package ansiec const Reset = "\x1b[0m"
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package ansiec const ( Bold = "\x1b[1m" Underline = "\x1b[4m" Reversed = "\x1b[7m" Italic = "\x1b[3m" )
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: Copyright (c) 2018 Alex Edwards
// Package argon2id provides a wrapper around Go's golang.org/x/crypto/argon2.
package argon2id
import (
"crypto/rand"
"crypto/subtle"
"encoding/base64"
"errors"
"fmt"
"runtime"
"strings"
"golang.org/x/crypto/argon2"
)
var (
// ErrInvalidHash in returned by ComparePasswordAndHash if the provided
// hash isn't in the expected format.
ErrInvalidHash = errors.New("argon2id: hash is not in the correct format")
// ErrIncompatibleVariant is returned by ComparePasswordAndHash if the
// provided hash was created using a unsupported variant of Argon2.
// Currently only argon2id is supported by this package.
ErrIncompatibleVariant = errors.New("argon2id: incompatible variant of argon2")
// ErrIncompatibleVersion is returned by ComparePasswordAndHash if the
// provided hash was created using a different version of Argon2.
ErrIncompatibleVersion = errors.New("argon2id: incompatible version of argon2")
)
// DefaultParams provides some sane default parameters for hashing passwords.
//
// Follows recommendations given by the Argon2 RFC:
// "The Argon2id variant with t=1 and maximum available memory is RECOMMENDED as a
// default setting for all environments. This setting is secure against side-channel
// attacks and maximizes adversarial costs on dedicated bruteforce hardware.""
//
// The default parameters should generally be used for development/testing purposes
// only. Custom parameters should be set for production applications depending on
// available memory/CPU resources and business requirements.
var DefaultParams = &Params{
Memory: 64 * 1024,
Iterations: 1,
Parallelism: uint8(runtime.NumCPU()),
SaltLength: 16,
KeyLength: 32,
}
// Params describes the input parameters used by the Argon2id algorithm. The
// Memory and Iterations parameters control the computational cost of hashing
// the password. The higher these figures are, the greater the cost of generating
// the hash and the longer the runtime. It also follows that the greater the cost
// will be for any attacker trying to guess the password. If the code is running
// on a machine with multiple cores, then you can decrease the runtime without
// reducing the cost by increasing the Parallelism parameter. This controls the
// number of threads that the work is spread across. Important note: Changing the
// value of the Parallelism parameter changes the hash output.
//
// For guidance and an outline process for choosing appropriate parameters see
// https://tools.ietf.org/html/draft-irtf-cfrg-argon2-04#section-4
type Params struct {
// The amount of memory used by the algorithm (in kibibytes).
Memory uint32
// The number of iterations over the memory.
Iterations uint32
// The number of threads (or lanes) used by the algorithm.
// Recommended value is between 1 and runtime.NumCPU().
Parallelism uint8
// Length of the random salt. 16 bytes is recommended for password hashing.
SaltLength uint32
// Length of the generated key. 16 bytes or more is recommended.
KeyLength uint32
}
// CreateHash returns an Argon2id hash of a plain-text password using the
// provided algorithm parameters. The returned hash follows the format used by
// the Argon2 reference C implementation and contains the base64-encoded Argon2id d
// derived key prefixed by the salt and parameters. It looks like this:
//
// $argon2id$v=19$m=65536,t=3,p=2$c29tZXNhbHQ$RdescudvJCsgt3ub+b+dWRWJTmaaJObG
func CreateHash(password string, params *Params) (hash string, err error) {
salt, err := generateRandomBytes(params.SaltLength)
if err != nil {
return "", err
}
key := argon2.IDKey([]byte(password), salt, params.Iterations, params.Memory, params.Parallelism, params.KeyLength)
b64Salt := base64.RawStdEncoding.EncodeToString(salt)
b64Key := base64.RawStdEncoding.EncodeToString(key)
hash = fmt.Sprintf("$argon2id$v=%d$m=%d,t=%d,p=%d$%s$%s", argon2.Version, params.Memory, params.Iterations, params.Parallelism, b64Salt, b64Key)
return hash, nil
}
// ComparePasswordAndHash performs a constant-time comparison between a
// plain-text password and Argon2id hash, using the parameters and salt
// contained in the hash. It returns true if they match, otherwise it returns
// false.
func ComparePasswordAndHash(password, hash string) (match bool, err error) {
match, _, err = CheckHash(password, hash)
return match, err
}
// CheckHash is like ComparePasswordAndHash, except it also returns the params that the hash was
// created with. This can be useful if you want to update your hash params over time (which you
// should).
func CheckHash(password, hash string) (match bool, params *Params, err error) {
params, salt, key, err := DecodeHash(hash)
if err != nil {
return false, nil, err
}
otherKey := argon2.IDKey([]byte(password), salt, params.Iterations, params.Memory, params.Parallelism, params.KeyLength)
keyLen := int32(len(key))
otherKeyLen := int32(len(otherKey))
if subtle.ConstantTimeEq(keyLen, otherKeyLen) == 0 {
return false, params, nil
}
if subtle.ConstantTimeCompare(key, otherKey) == 1 {
return true, params, nil
}
return false, params, nil
}
func generateRandomBytes(n uint32) ([]byte, error) {
b := make([]byte, n)
_, err := rand.Read(b)
if err != nil {
return nil, err
}
return b, nil
}
// DecodeHash expects a hash created from this package, and parses it to return the params used to
// create it, as well as the salt and key (password hash).
func DecodeHash(hash string) (params *Params, salt, key []byte, err error) {
vals := strings.Split(hash, "$")
if len(vals) != 6 {
return nil, nil, nil, ErrInvalidHash
}
if vals[1] != "argon2id" {
return nil, nil, nil, ErrIncompatibleVariant
}
var version int
_, err = fmt.Sscanf(vals[2], "v=%d", &version)
if err != nil {
return nil, nil, nil, err
}
if version != argon2.Version {
return nil, nil, nil, ErrIncompatibleVersion
}
params = &Params{}
_, err = fmt.Sscanf(vals[3], "m=%d,t=%d,p=%d", ¶ms.Memory, ¶ms.Iterations, ¶ms.Parallelism)
if err != nil {
return nil, nil, nil, err
}
salt, err = base64.RawStdEncoding.Strict().DecodeString(vals[4])
if err != nil {
return nil, nil, nil, err
}
params.SaltLength = uint32(len(salt))
key, err = base64.RawStdEncoding.Strict().DecodeString(vals[5])
if err != nil {
return nil, nil, nil, err
}
params.KeyLength = uint32(len(key))
return params, salt, key, nil
}
// SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> // Package bare provides primitives to encode and decode BARE messages. // // There is no guarantee that this is compatible with the upstream // implementation at https://git.sr.ht/~sircmpwn/go-bare. package bare
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com>
package bare
import (
"errors"
"fmt"
"reflect"
)
var ErrInvalidStr = errors.New("String contains invalid UTF-8 sequences")
type UnsupportedTypeError struct {
Type reflect.Type
}
func (e *UnsupportedTypeError) Error() string {
return fmt.Sprintf("Unsupported type for marshaling: %s\n", e.Type.String())
}
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com>
package bare
import (
"errors"
"io"
)
var (
maxUnmarshalBytes uint64 = 1024 * 1024 * 32 /* 32 MiB */
maxArrayLength uint64 = 1024 * 4 /* 4096 elements */
maxMapSize uint64 = 1024
)
// MaxUnmarshalBytes sets the maximum size of a message decoded by unmarshal.
// By default, this is set to 32 MiB.
func MaxUnmarshalBytes(bytes uint64) {
maxUnmarshalBytes = bytes
}
// MaxArrayLength sets maximum number of elements in array. Defaults to 4096 elements
func MaxArrayLength(length uint64) {
maxArrayLength = length
}
// MaxMapSize sets maximum size of map. Defaults to 1024 key/value pairs
func MaxMapSize(size uint64) {
maxMapSize = size
}
// Use MaxUnmarshalBytes to prevent this error from occuring on messages which
// are large by design.
var ErrLimitExceeded = errors.New("Maximum message size exceeded")
// Identical to io.LimitedReader, except it returns our custom error instead of
// EOF if the limit is reached.
type limitedReader struct {
R io.Reader
N uint64
}
func (l *limitedReader) Read(p []byte) (n int, err error) {
if l.N <= 0 {
return 0, ErrLimitExceeded
}
if uint64(len(p)) > l.N {
p = p[0:l.N]
}
n, err = l.R.Read(p)
l.N -= uint64(n)
return
}
func newLimitedReader(r io.Reader) *limitedReader {
return &limitedReader{r, maxUnmarshalBytes}
}
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com>
package bare
import (
"bytes"
"errors"
"fmt"
"reflect"
"sync"
)
// A type which implements this interface will be responsible for marshaling
// itself when encountered.
type Marshalable interface {
Marshal(w *Writer) error
}
var encoderBufferPool = sync.Pool{
New: func() interface{} {
buf := &bytes.Buffer{}
buf.Grow(32)
return buf
},
}
// Marshals a value (val, which must be a pointer) into a BARE message.
//
// The encoding of each struct field can be customized by the format string
// stored under the "bare" key in the struct field's tag.
//
// As a special case, if the field tag is "-", the field is always omitted.
func Marshal(val interface{}) ([]byte, error) {
// reuse buffers from previous serializations
b := encoderBufferPool.Get().(*bytes.Buffer)
defer func() {
b.Reset()
encoderBufferPool.Put(b)
}()
w := NewWriter(b)
err := MarshalWriter(w, val)
msg := make([]byte, b.Len())
copy(msg, b.Bytes())
return msg, err
}
// Marshals a value (val, which must be a pointer) into a BARE message and
// writes it to a Writer. See Marshal for details.
func MarshalWriter(w *Writer, val interface{}) error {
t := reflect.TypeOf(val)
v := reflect.ValueOf(val)
if t.Kind() != reflect.Ptr {
return errors.New("Expected val to be pointer type")
}
return getEncoder(t.Elem())(w, v.Elem())
}
type encodeFunc func(w *Writer, v reflect.Value) error
var encodeFuncCache sync.Map // map[reflect.Type]encodeFunc
// get decoder from cache
func getEncoder(t reflect.Type) encodeFunc {
if f, ok := encodeFuncCache.Load(t); ok {
return f.(encodeFunc)
}
f := encoderFunc(t)
encodeFuncCache.Store(t, f)
return f
}
var marshalableInterface = reflect.TypeOf((*Unmarshalable)(nil)).Elem()
func encoderFunc(t reflect.Type) encodeFunc {
if reflect.PointerTo(t).Implements(marshalableInterface) {
return func(w *Writer, v reflect.Value) error {
uv := v.Addr().Interface().(Marshalable)
return uv.Marshal(w)
}
}
if t.Kind() == reflect.Interface && t.Implements(unionInterface) {
return encodeUnion(t)
}
switch t.Kind() {
case reflect.Ptr:
return encodeOptional(t.Elem())
case reflect.Struct:
return encodeStruct(t)
case reflect.Array:
return encodeArray(t)
case reflect.Slice:
return encodeSlice(t)
case reflect.Map:
return encodeMap(t)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return encodeUint
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return encodeInt
case reflect.Float32, reflect.Float64:
return encodeFloat
case reflect.Bool:
return encodeBool
case reflect.String:
return encodeString
}
return func(w *Writer, v reflect.Value) error {
return &UnsupportedTypeError{v.Type()}
}
}
func encodeOptional(t reflect.Type) encodeFunc {
return func(w *Writer, v reflect.Value) error {
if v.IsNil() {
return w.WriteBool(false)
}
if err := w.WriteBool(true); err != nil {
return err
}
return getEncoder(t)(w, v.Elem())
}
}
func encodeStruct(t reflect.Type) encodeFunc {
n := t.NumField()
encoders := make([]encodeFunc, n)
for i := 0; i < n; i++ {
field := t.Field(i)
if field.Tag.Get("bare") == "-" {
continue
}
encoders[i] = getEncoder(field.Type)
}
return func(w *Writer, v reflect.Value) error {
for i := 0; i < n; i++ {
if encoders[i] == nil {
continue
}
err := encoders[i](w, v.Field(i))
if err != nil {
return err
}
}
return nil
}
}
func encodeArray(t reflect.Type) encodeFunc {
f := getEncoder(t.Elem())
len := t.Len()
return func(w *Writer, v reflect.Value) error {
for i := 0; i < len; i++ {
if err := f(w, v.Index(i)); err != nil {
return err
}
}
return nil
}
}
func encodeSlice(t reflect.Type) encodeFunc {
elem := t.Elem()
f := getEncoder(elem)
return func(w *Writer, v reflect.Value) error {
if err := w.WriteUint(uint64(v.Len())); err != nil {
return err
}
for i := 0; i < v.Len(); i++ {
if err := f(w, v.Index(i)); err != nil {
return err
}
}
return nil
}
}
func encodeMap(t reflect.Type) encodeFunc {
keyType := t.Key()
keyf := getEncoder(keyType)
valueType := t.Elem()
valf := getEncoder(valueType)
return func(w *Writer, v reflect.Value) error {
if err := w.WriteUint(uint64(v.Len())); err != nil {
return err
}
iter := v.MapRange()
for iter.Next() {
if err := keyf(w, iter.Key()); err != nil {
return err
}
if err := valf(w, iter.Value()); err != nil {
return err
}
}
return nil
}
}
func encodeUnion(t reflect.Type) encodeFunc {
ut, ok := unionRegistry[t]
if !ok {
return func(w *Writer, v reflect.Value) error {
return fmt.Errorf("Union type %s is not registered", t.Name())
}
}
encoders := make(map[uint64]encodeFunc)
for tag, t := range ut.types {
encoders[tag] = getEncoder(t)
}
return func(w *Writer, v reflect.Value) error {
t := v.Elem().Type()
if t.Kind() == reflect.Ptr {
// If T is a valid union value type, *T is valid too.
t = t.Elem()
v = v.Elem()
}
tag, ok := ut.tags[t]
if !ok {
return fmt.Errorf("Invalid union value: %s", v.Elem().String())
}
if err := w.WriteUint(tag); err != nil {
return err
}
return encoders[tag](w, v.Elem())
}
}
func encodeUint(w *Writer, v reflect.Value) error {
switch getIntKind(v.Type()) {
case reflect.Uint:
return w.WriteUint(v.Uint())
case reflect.Uint8:
return w.WriteU8(uint8(v.Uint()))
case reflect.Uint16:
return w.WriteU16(uint16(v.Uint()))
case reflect.Uint32:
return w.WriteU32(uint32(v.Uint()))
case reflect.Uint64:
return w.WriteU64(uint64(v.Uint()))
}
panic("not uint")
}
func encodeInt(w *Writer, v reflect.Value) error {
switch getIntKind(v.Type()) {
case reflect.Int:
return w.WriteInt(v.Int())
case reflect.Int8:
return w.WriteI8(int8(v.Int()))
case reflect.Int16:
return w.WriteI16(int16(v.Int()))
case reflect.Int32:
return w.WriteI32(int32(v.Int()))
case reflect.Int64:
return w.WriteI64(int64(v.Int()))
}
panic("not int")
}
func encodeFloat(w *Writer, v reflect.Value) error {
switch v.Type().Kind() {
case reflect.Float32:
return w.WriteF32(float32(v.Float()))
case reflect.Float64:
return w.WriteF64(v.Float())
}
panic("not float")
}
func encodeBool(w *Writer, v reflect.Value) error {
return w.WriteBool(v.Bool())
}
func encodeString(w *Writer, v reflect.Value) error {
if v.Kind() != reflect.String {
panic("not string")
}
return w.WriteString(v.String())
}
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com>
package bare
import (
"encoding/binary"
"fmt"
"io"
"math"
"unicode/utf8"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)
type byteReader interface {
io.Reader
io.ByteReader
}
// A Reader for BARE primitive types.
type Reader struct {
base byteReader
scratch [8]byte
}
type simpleByteReader struct {
io.Reader
scratch [1]byte
}
func (r simpleByteReader) ReadByte() (byte, error) {
// using reference type here saves us allocations
_, err := r.Read(r.scratch[:])
return r.scratch[0], err
}
// Returns a new BARE primitive reader wrapping the given io.Reader.
func NewReader(base io.Reader) *Reader {
br, ok := base.(byteReader)
if !ok {
br = simpleByteReader{Reader: base}
}
return &Reader{base: br}
}
func (r *Reader) ReadUint() (uint64, error) {
x, err := binary.ReadUvarint(r.base)
if err != nil {
return x, err
}
return x, nil
}
func (r *Reader) ReadU8() (uint8, error) {
return r.base.ReadByte()
}
func (r *Reader) ReadU16() (uint16, error) {
var i uint16
if _, err := io.ReadAtLeast(r.base, r.scratch[:2], 2); err != nil {
return i, err
}
return binary.LittleEndian.Uint16(r.scratch[:]), nil
}
func (r *Reader) ReadU32() (uint32, error) {
var i uint32
if _, err := io.ReadAtLeast(r.base, r.scratch[:4], 4); err != nil {
return i, err
}
return binary.LittleEndian.Uint32(r.scratch[:]), nil
}
func (r *Reader) ReadU64() (uint64, error) {
var i uint64
if _, err := io.ReadAtLeast(r.base, r.scratch[:8], 8); err != nil {
return i, err
}
return binary.LittleEndian.Uint64(r.scratch[:]), nil
}
func (r *Reader) ReadInt() (int64, error) {
return binary.ReadVarint(r.base)
}
func (r *Reader) ReadI8() (int8, error) {
b, err := r.base.ReadByte()
return int8(b), err
}
func (r *Reader) ReadI16() (int16, error) {
var i int16
if _, err := io.ReadAtLeast(r.base, r.scratch[:2], 2); err != nil {
return i, err
}
return int16(binary.LittleEndian.Uint16(r.scratch[:])), nil
}
func (r *Reader) ReadI32() (int32, error) {
var i int32
if _, err := io.ReadAtLeast(r.base, r.scratch[:4], 4); err != nil {
return i, err
}
return int32(binary.LittleEndian.Uint32(r.scratch[:])), nil
}
func (r *Reader) ReadI64() (int64, error) {
var i int64
if _, err := io.ReadAtLeast(r.base, r.scratch[:], 8); err != nil {
return i, err
}
return int64(binary.LittleEndian.Uint64(r.scratch[:])), nil
}
func (r *Reader) ReadF32() (float32, error) {
u, err := r.ReadU32()
f := math.Float32frombits(u)
if math.IsNaN(float64(f)) {
return 0.0, fmt.Errorf("NaN is not permitted in BARE floats")
}
return f, err
}
func (r *Reader) ReadF64() (float64, error) {
u, err := r.ReadU64()
f := math.Float64frombits(u)
if math.IsNaN(f) {
return 0.0, fmt.Errorf("NaN is not permitted in BARE floats")
}
return f, err
}
func (r *Reader) ReadBool() (bool, error) {
b, err := r.ReadU8()
if err != nil {
return false, err
}
if b > 1 {
return false, fmt.Errorf("Invalid bool value: %#x", b)
}
return b == 1, nil
}
func (r *Reader) ReadString() (string, error) {
buf, err := r.ReadData()
if err != nil {
return "", err
}
if !utf8.Valid(buf) {
return "", ErrInvalidStr
}
return misc.BytesToString(buf), nil
}
// Reads a fixed amount of arbitrary data, defined by the length of the slice.
func (r *Reader) ReadDataFixed(dest []byte) error {
var amt int = 0
for amt < len(dest) {
n, err := r.base.Read(dest[amt:])
if err != nil {
return err
}
amt += n
}
return nil
}
// Reads arbitrary data whose length is read from the message.
func (r *Reader) ReadData() ([]byte, error) {
l, err := r.ReadUint()
if err != nil {
return nil, err
}
if l >= maxUnmarshalBytes {
return nil, ErrLimitExceeded
}
buf := make([]byte, l)
var amt uint64 = 0
for amt < l {
n, err := r.base.Read(buf[amt:])
if err != nil {
return nil, err
}
amt += uint64(n)
}
return buf, nil
}
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com>
package bare
import (
"fmt"
"reflect"
)
// Any type which is a union member must implement this interface. You must
// also call RegisterUnion for go-bare to marshal or unmarshal messages which
// utilize your union type.
type Union interface {
IsUnion()
}
type UnionTags struct {
iface reflect.Type
tags map[reflect.Type]uint64
types map[uint64]reflect.Type
}
var unionInterface = reflect.TypeOf((*Union)(nil)).Elem()
var unionRegistry map[reflect.Type]*UnionTags
func init() {
unionRegistry = make(map[reflect.Type]*UnionTags)
}
// Registers a union type in this context. Pass the union interface and the
// list of types associated with it, sorted ascending by their union tag.
func RegisterUnion(iface interface{}) *UnionTags {
ity := reflect.TypeOf(iface).Elem()
if _, ok := unionRegistry[ity]; ok {
panic(fmt.Errorf("Type %s has already been registered", ity.Name()))
}
if !ity.Implements(reflect.TypeOf((*Union)(nil)).Elem()) {
panic(fmt.Errorf("Type %s does not implement bare.Union", ity.Name()))
}
utypes := &UnionTags{
iface: ity,
tags: make(map[reflect.Type]uint64),
types: make(map[uint64]reflect.Type),
}
unionRegistry[ity] = utypes
return utypes
}
func (ut *UnionTags) Member(t interface{}, tag uint64) *UnionTags {
ty := reflect.TypeOf(t)
if !ty.AssignableTo(ut.iface) {
panic(fmt.Errorf("Type %s does not implement interface %s",
ty.Name(), ut.iface.Name()))
}
if _, ok := ut.tags[ty]; ok {
panic(fmt.Errorf("Type %s is already registered for union %s",
ty.Name(), ut.iface.Name()))
}
if _, ok := ut.types[tag]; ok {
panic(fmt.Errorf("Tag %d is already registered for union %s",
tag, ut.iface.Name()))
}
ut.tags[ty] = tag
ut.types[tag] = ty
return ut
}
func (ut *UnionTags) TagFor(v interface{}) (uint64, bool) {
tag, ok := ut.tags[reflect.TypeOf(v)]
return tag, ok
}
func (ut *UnionTags) TypeFor(tag uint64) (reflect.Type, bool) {
t, ok := ut.types[tag]
return t, ok
}
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com>
package bare
import (
"bytes"
"errors"
"fmt"
"io"
"reflect"
"sync"
)
// A type which implements this interface will be responsible for unmarshaling
// itself when encountered.
type Unmarshalable interface {
Unmarshal(r *Reader) error
}
// Unmarshals a BARE message into val, which must be a pointer to a value of
// the message type.
func Unmarshal(data []byte, val interface{}) error {
b := bytes.NewReader(data)
r := NewReader(b)
return UnmarshalBareReader(r, val)
}
// Unmarshals a BARE message into value (val, which must be a pointer), from a
// reader. See Unmarshal for details.
func UnmarshalReader(r io.Reader, val interface{}) error {
r = newLimitedReader(r)
return UnmarshalBareReader(NewReader(r), val)
}
type decodeFunc func(r *Reader, v reflect.Value) error
var decodeFuncCache sync.Map // map[reflect.Type]decodeFunc
func UnmarshalBareReader(r *Reader, val interface{}) error {
t := reflect.TypeOf(val)
v := reflect.ValueOf(val)
if t.Kind() != reflect.Ptr {
return errors.New("Expected val to be pointer type")
}
return getDecoder(t.Elem())(r, v.Elem())
}
// get decoder from cache
func getDecoder(t reflect.Type) decodeFunc {
if f, ok := decodeFuncCache.Load(t); ok {
return f.(decodeFunc)
}
f := decoderFunc(t)
decodeFuncCache.Store(t, f)
return f
}
var unmarshalableInterface = reflect.TypeOf((*Unmarshalable)(nil)).Elem()
func decoderFunc(t reflect.Type) decodeFunc {
if reflect.PointerTo(t).Implements(unmarshalableInterface) {
return func(r *Reader, v reflect.Value) error {
uv := v.Addr().Interface().(Unmarshalable)
return uv.Unmarshal(r)
}
}
if t.Kind() == reflect.Interface && t.Implements(unionInterface) {
return decodeUnion(t)
}
switch t.Kind() {
case reflect.Ptr:
return decodeOptional(t.Elem())
case reflect.Struct:
return decodeStruct(t)
case reflect.Array:
return decodeArray(t)
case reflect.Slice:
return decodeSlice(t)
case reflect.Map:
return decodeMap(t)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return decodeUint
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return decodeInt
case reflect.Float32, reflect.Float64:
return decodeFloat
case reflect.Bool:
return decodeBool
case reflect.String:
return decodeString
}
return func(r *Reader, v reflect.Value) error {
return &UnsupportedTypeError{v.Type()}
}
}
func decodeOptional(t reflect.Type) decodeFunc {
return func(r *Reader, v reflect.Value) error {
s, err := r.ReadU8()
if err != nil {
return err
}
if s > 1 {
return fmt.Errorf("Invalid optional value: %#x", s)
}
if s == 0 {
return nil
}
v.Set(reflect.New(t))
return getDecoder(t)(r, v.Elem())
}
}
func decodeStruct(t reflect.Type) decodeFunc {
n := t.NumField()
decoders := make([]decodeFunc, n)
for i := 0; i < n; i++ {
field := t.Field(i)
if field.Tag.Get("bare") == "-" {
continue
}
decoders[i] = getDecoder(field.Type)
}
return func(r *Reader, v reflect.Value) error {
for i := 0; i < n; i++ {
if decoders[i] == nil {
continue
}
err := decoders[i](r, v.Field(i))
if err != nil {
return err
}
}
return nil
}
}
func decodeArray(t reflect.Type) decodeFunc {
f := getDecoder(t.Elem())
len := t.Len()
return func(r *Reader, v reflect.Value) error {
for i := 0; i < len; i++ {
err := f(r, v.Index(i))
if err != nil {
return err
}
}
return nil
}
}
func decodeSlice(t reflect.Type) decodeFunc {
elem := t.Elem()
f := getDecoder(elem)
return func(r *Reader, v reflect.Value) error {
len, err := r.ReadUint()
if err != nil {
return err
}
if len > maxArrayLength {
return fmt.Errorf("Array length %d exceeds configured limit of %d", len, maxArrayLength)
}
v.Set(reflect.MakeSlice(t, int(len), int(len)))
for i := 0; i < int(len); i++ {
if err := f(r, v.Index(i)); err != nil {
return err
}
}
return nil
}
}
func decodeMap(t reflect.Type) decodeFunc {
keyType := t.Key()
keyf := getDecoder(keyType)
valueType := t.Elem()
valf := getDecoder(valueType)
return func(r *Reader, v reflect.Value) error {
size, err := r.ReadUint()
if err != nil {
return err
}
if size > maxMapSize {
return fmt.Errorf("Map size %d exceeds configured limit of %d", size, maxMapSize)
}
v.Set(reflect.MakeMapWithSize(t, int(size)))
key := reflect.New(keyType).Elem()
value := reflect.New(valueType).Elem()
for i := uint64(0); i < size; i++ {
if err := keyf(r, key); err != nil {
return err
}
if v.MapIndex(key).Kind() > reflect.Invalid {
return fmt.Errorf("Encountered duplicate map key: %v", key.Interface())
}
if err := valf(r, value); err != nil {
return err
}
v.SetMapIndex(key, value)
}
return nil
}
}
func decodeUnion(t reflect.Type) decodeFunc {
ut, ok := unionRegistry[t]
if !ok {
return func(r *Reader, v reflect.Value) error {
return fmt.Errorf("Union type %s is not registered", t.Name())
}
}
decoders := make(map[uint64]decodeFunc)
for tag, t := range ut.types {
t := t
f := getDecoder(t)
decoders[tag] = func(r *Reader, v reflect.Value) error {
nv := reflect.New(t)
if err := f(r, nv.Elem()); err != nil {
return err
}
v.Set(nv)
return nil
}
}
return func(r *Reader, v reflect.Value) error {
tag, err := r.ReadUint()
if err != nil {
return err
}
if f, ok := decoders[tag]; ok {
return f(r, v)
}
return fmt.Errorf("Invalid union tag %d for type %s", tag, t.Name())
}
}
func decodeUint(r *Reader, v reflect.Value) error {
var err error
switch getIntKind(v.Type()) {
case reflect.Uint:
var u uint64
u, err = r.ReadUint()
v.SetUint(u)
case reflect.Uint8:
var u uint8
u, err = r.ReadU8()
v.SetUint(uint64(u))
case reflect.Uint16:
var u uint16
u, err = r.ReadU16()
v.SetUint(uint64(u))
case reflect.Uint32:
var u uint32
u, err = r.ReadU32()
v.SetUint(uint64(u))
case reflect.Uint64:
var u uint64
u, err = r.ReadU64()
v.SetUint(uint64(u))
default:
panic("not an uint")
}
return err
}
func decodeInt(r *Reader, v reflect.Value) error {
var err error
switch getIntKind(v.Type()) {
case reflect.Int:
var i int64
i, err = r.ReadInt()
v.SetInt(i)
case reflect.Int8:
var i int8
i, err = r.ReadI8()
v.SetInt(int64(i))
case reflect.Int16:
var i int16
i, err = r.ReadI16()
v.SetInt(int64(i))
case reflect.Int32:
var i int32
i, err = r.ReadI32()
v.SetInt(int64(i))
case reflect.Int64:
var i int64
i, err = r.ReadI64()
v.SetInt(int64(i))
default:
panic("not an int")
}
return err
}
func decodeFloat(r *Reader, v reflect.Value) error {
var err error
switch v.Type().Kind() {
case reflect.Float32:
var f float32
f, err = r.ReadF32()
v.SetFloat(float64(f))
case reflect.Float64:
var f float64
f, err = r.ReadF64()
v.SetFloat(f)
default:
panic("not a float")
}
return err
}
func decodeBool(r *Reader, v reflect.Value) error {
b, err := r.ReadBool()
v.SetBool(b)
return err
}
func decodeString(r *Reader, v reflect.Value) error {
s, err := r.ReadString()
v.SetString(s)
return err
}
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com>
package bare
import (
"reflect"
)
// Int is a variable-length encoded signed integer.
type Int int64
// Uint is a variable-length encoded unsigned integer.
type Uint uint64
var (
intType = reflect.TypeOf(Int(0))
uintType = reflect.TypeOf(Uint(0))
)
func getIntKind(t reflect.Type) reflect.Kind {
switch t {
case intType:
return reflect.Int
case uintType:
return reflect.Uint
default:
return t.Kind()
}
}
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright (c) 2025 Drew Devault <https://drewdevault.com>
package bare
import (
"encoding/binary"
"fmt"
"io"
"math"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)
// A Writer for BARE primitive types.
type Writer struct {
base io.Writer
scratch [binary.MaxVarintLen64]byte
}
// Returns a new BARE primitive writer wrapping the given io.Writer.
func NewWriter(base io.Writer) *Writer {
return &Writer{base: base}
}
func (w *Writer) WriteUint(i uint64) error {
n := binary.PutUvarint(w.scratch[:], i)
_, err := w.base.Write(w.scratch[:n])
return err
}
func (w *Writer) WriteU8(i uint8) error {
return binary.Write(w.base, binary.LittleEndian, i)
}
func (w *Writer) WriteU16(i uint16) error {
return binary.Write(w.base, binary.LittleEndian, i)
}
func (w *Writer) WriteU32(i uint32) error {
return binary.Write(w.base, binary.LittleEndian, i)
}
func (w *Writer) WriteU64(i uint64) error {
return binary.Write(w.base, binary.LittleEndian, i)
}
func (w *Writer) WriteInt(i int64) error {
var buf [binary.MaxVarintLen64]byte
n := binary.PutVarint(buf[:], i)
_, err := w.base.Write(buf[:n])
return err
}
func (w *Writer) WriteI8(i int8) error {
return binary.Write(w.base, binary.LittleEndian, i)
}
func (w *Writer) WriteI16(i int16) error {
return binary.Write(w.base, binary.LittleEndian, i)
}
func (w *Writer) WriteI32(i int32) error {
return binary.Write(w.base, binary.LittleEndian, i)
}
func (w *Writer) WriteI64(i int64) error {
return binary.Write(w.base, binary.LittleEndian, i)
}
func (w *Writer) WriteF32(f float32) error {
if math.IsNaN(float64(f)) {
return fmt.Errorf("NaN is not permitted in BARE floats")
}
return binary.Write(w.base, binary.LittleEndian, f)
}
func (w *Writer) WriteF64(f float64) error {
if math.IsNaN(f) {
return fmt.Errorf("NaN is not permitted in BARE floats")
}
return binary.Write(w.base, binary.LittleEndian, f)
}
func (w *Writer) WriteBool(b bool) error {
return binary.Write(w.base, binary.LittleEndian, b)
}
func (w *Writer) WriteString(str string) error {
return w.WriteData(misc.StringToBytes(str))
}
// Writes a fixed amount of arbitrary data, defined by the length of the slice.
func (w *Writer) WriteDataFixed(data []byte) error {
var amt int = 0
for amt < len(data) {
n, err := w.base.Write(data[amt:])
if err != nil {
return err
}
amt += n
}
return nil
}
// Writes arbitrary data whose length is encoded into the message.
func (w *Writer) WriteData(data []byte) error {
err := w.WriteUint(uint64(len(data)))
if err != nil {
return err
}
var amt int = 0
for amt < len(data) {
n, err := w.base.Write(data[amt:])
if err != nil {
return err
}
amt += n
}
return nil
}
// Inspired by github.com/SaveTheRbtz/generic-sync-map-go but technically
// written from scratch with Go 1.23's sync.Map.
// Copyright 2024 Runxi Yu (porting it to generics)
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.BSD file.
package cmap
import (
"sync"
"sync/atomic"
"unsafe"
)
// ComparableMap[K comparable, V comparable] is like a Go map[K]V but is safe for concurrent use
// by multiple goroutines without additional locking or coordination. Loads,
// stores, and deletes run in amortized constant time.
//
// The ComparableMap type is optimized for two common use cases: (1) when the comparableEntry for a given
// key is only ever written once but read many times, as in caches that only grow,
// or (2) when multiple goroutines read, write, and overwrite entries for disjoint
// sets of keys. In these two cases, use of a ComparableMap may significantly reduce lock
// contention compared to a Go map paired with a separate [Mutex] or [RWMutex].
//
// The zero ComparableMap is empty and ready for use. A ComparableMap must not be copied after first use.
//
// In the terminology of [the Go memory model], ComparableMap arranges that a write operation
// “synchronizes before” any read operation that observes the effect of the write, where
// read and write operations are defined as follows.
// [ComparableMap.Load], [ComparableMap.LoadAndDelete], [ComparableMap.LoadOrStore], [ComparableMap.Swap], [ComparableMap.CompareAndSwap],
// and [ComparableMap.CompareAndDelete] are read operations;
// [ComparableMap.Delete], [ComparableMap.LoadAndDelete], [ComparableMap.Store], and [ComparableMap.Swap] are write operations;
// [ComparableMap.LoadOrStore] is a write operation when it returns loaded set to false;
// [ComparableMap.CompareAndSwap] is a write operation when it returns swapped set to true;
// and [ComparableMap.CompareAndDelete] is a write operation when it returns deleted set to true.
//
// [the Go memory model]: https://go.dev/ref/mem
type ComparableMap[K comparable, V comparable] struct {
mu sync.Mutex
// read contains the portion of the map's contents that are safe for
// concurrent access (with or without mu held).
//
// The read field itself is always safe to load, but must only be stored with
// mu held.
//
// Entries stored in read may be updated concurrently without mu, but updating
// a previously-comparableExpunged comparableEntry requires that the comparableEntry be copied to the dirty
// map and uncomparableExpunged with mu held.
read atomic.Pointer[comparableReadOnly[K, V]]
// dirty contains the portion of the map's contents that require mu to be
// held. To ensure that the dirty map can be promoted to the read map quickly,
// it also includes all of the non-comparableExpunged entries in the read map.
//
// Expunged entries are not stored in the dirty map. An comparableExpunged comparableEntry in the
// clean map must be uncomparableExpunged and added to the dirty map before a new value
// can be stored to it.
//
// If the dirty map is nil, the next write to the map will initialize it by
// making a shallow copy of the clean map, omitting stale entries.
dirty map[K]*comparableEntry[V]
// misses counts the number of loads since the read map was last updated that
// needed to lock mu to determine whether the key was present.
//
// Once enough misses have occurred to cover the cost of copying the dirty
// map, the dirty map will be promoted to the read map (in the unamended
// state) and the next store to the map will make a new dirty copy.
misses int
}
// comparableReadOnly is an immutable struct stored atomically in the ComparableMap.read field.
type comparableReadOnly[K comparable, V comparable] struct {
m map[K]*comparableEntry[V]
amended bool // true if the dirty map contains some key not in m.
}
// comparableExpunged is an arbitrary pointer that marks entries which have been deleted
// from the dirty map.
var comparableExpunged = unsafe.Pointer(new(any))
// An comparableEntry is a slot in the map corresponding to a particular key.
type comparableEntry[V comparable] struct {
// p points to the value stored for the comparableEntry.
//
// If p == nil, the comparableEntry has been deleted, and either m.dirty == nil or
// m.dirty[key] is e.
//
// If p == comparableExpunged, the comparableEntry has been deleted, m.dirty != nil, and the comparableEntry
// is missing from m.dirty.
//
// Otherwise, the comparableEntry is valid and recorded in m.read.m[key] and, if m.dirty
// != nil, in m.dirty[key].
//
// An comparableEntry can be deleted by atomic replacement with nil: when m.dirty is
// next created, it will atomically replace nil with comparableExpunged and leave
// m.dirty[key] unset.
//
// An comparableEntry's associated value can be updated by atomic replacement, provided
// p != comparableExpunged. If p == comparableExpunged, an comparableEntry's associated value can be updated
// only after first setting m.dirty[key] = e so that lookups using the dirty
// map find the comparableEntry.
p unsafe.Pointer
}
func newComparableEntry[V comparable](i V) *comparableEntry[V] {
return &comparableEntry[V]{p: unsafe.Pointer(&i)}
}
func (m *ComparableMap[K, V]) loadReadOnly() comparableReadOnly[K, V] {
if p := m.read.Load(); p != nil {
return *p
}
return comparableReadOnly[K, V]{}
}
// Load returns the value stored in the map for a key, or nil if no
// value is present.
// The ok result indicates whether value was found in the map.
func (m *ComparableMap[K, V]) Load(key K) (value V, ok bool) {
read := m.loadReadOnly()
e, ok := read.m[key]
if !ok && read.amended {
m.mu.Lock()
// Avoid reporting a spurious miss if m.dirty got promoted while we were
// blocked on m.mu. (If further loads of the same key will not miss, it's
// not worth copying the dirty map for this key.)
read = m.loadReadOnly()
e, ok = read.m[key]
if !ok && read.amended {
e, ok = m.dirty[key]
// Regardless of whether the comparableEntry was present, record a miss: this key
// will take the slow path until the dirty map is promoted to the read
// map.
m.missLocked()
}
m.mu.Unlock()
}
if !ok {
return *new(V), false
}
return e.load()
}
func (e *comparableEntry[V]) load() (value V, ok bool) {
p := atomic.LoadPointer(&e.p)
if p == nil || p == comparableExpunged {
return value, false
}
return *(*V)(p), true
}
// Store sets the value for a key.
func (m *ComparableMap[K, V]) Store(key K, value V) {
_, _ = m.Swap(key, value)
}
// Clear deletes all the entries, resulting in an empty ComparableMap.
func (m *ComparableMap[K, V]) Clear() {
read := m.loadReadOnly()
if len(read.m) == 0 && !read.amended {
// Avoid allocating a new comparableReadOnly when the map is already clear.
return
}
m.mu.Lock()
defer m.mu.Unlock()
read = m.loadReadOnly()
if len(read.m) > 0 || read.amended {
m.read.Store(&comparableReadOnly[K, V]{})
}
clear(m.dirty)
// Don't immediately promote the newly-cleared dirty map on the next operation.
m.misses = 0
}
// tryCompareAndSwap compare the comparableEntry with the given old value and swaps
// it with a new value if the comparableEntry is equal to the old value, and the comparableEntry
// has not been comparableExpunged.
//
// If the comparableEntry is comparableExpunged, tryCompareAndSwap returns false and leaves
// the comparableEntry unchanged.
func (e *comparableEntry[V]) tryCompareAndSwap(old V, new V) bool {
p := atomic.LoadPointer(&e.p)
if p == nil || p == comparableExpunged || *(*V)(p) != old { // XXX
return false
}
// Copy the pointer after the first load to make this method more amenable
// to escape analysis: if the comparison fails from the start, we shouldn't
// bother heap-allocating a pointer to store.
nc := new
for {
if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(&nc)) {
return true
}
p = atomic.LoadPointer(&e.p)
if p == nil || p == comparableExpunged || *(*V)(p) != old {
return false
}
}
}
// unexpungeLocked ensures that the comparableEntry is not marked as comparableExpunged.
//
// If the comparableEntry was previously comparableExpunged, it must be added to the dirty map
// before m.mu is unlocked.
func (e *comparableEntry[V]) unexpungeLocked() (wasExpunged bool) {
return atomic.CompareAndSwapPointer(&e.p, comparableExpunged, nil)
}
// swapLocked unconditionally swaps a value into the comparableEntry.
//
// The comparableEntry must be known not to be comparableExpunged.
func (e *comparableEntry[V]) swapLocked(i *V) *V {
return (*V)(atomic.SwapPointer(&e.p, unsafe.Pointer(i)))
}
// LoadOrStore returns the existing value for the key if present.
// Otherwise, it stores and returns the given value.
// The loaded result is true if the value was loaded, false if stored.
func (m *ComparableMap[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
// Avoid locking if it's a clean hit.
read := m.loadReadOnly()
if e, ok := read.m[key]; ok {
actual, loaded, ok := e.tryLoadOrStore(value)
if ok {
return actual, loaded
}
}
m.mu.Lock()
read = m.loadReadOnly()
if e, ok := read.m[key]; ok {
if e.unexpungeLocked() {
m.dirty[key] = e
}
actual, loaded, _ = e.tryLoadOrStore(value)
} else if e, ok := m.dirty[key]; ok {
actual, loaded, _ = e.tryLoadOrStore(value)
m.missLocked()
} else {
if !read.amended {
// We're adding the first new key to the dirty map.
// Make sure it is allocated and mark the read-only map as incomplete.
m.dirtyLocked()
m.read.Store(&comparableReadOnly[K, V]{m: read.m, amended: true})
}
m.dirty[key] = newComparableEntry(value)
actual, loaded = value, false
}
m.mu.Unlock()
return actual, loaded
}
// tryLoadOrStore atomically loads or stores a value if the comparableEntry is not
// comparableExpunged.
//
// If the comparableEntry is comparableExpunged, tryLoadOrStore leaves the comparableEntry unchanged and
// returns with ok==false.
func (e *comparableEntry[V]) tryLoadOrStore(i V) (actual V, loaded, ok bool) {
p := atomic.LoadPointer(&e.p)
if p == comparableExpunged {
return actual, false, false
}
if p != nil {
return *(*V)(p), true, true
}
// Copy the pointer after the first load to make this method more amenable
// to escape analysis: if we hit the "load" path or the comparableEntry is comparableExpunged, we
// shouldn't bother heap-allocating.
ic := i
for {
if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
return i, false, true
}
p = atomic.LoadPointer(&e.p)
if p == comparableExpunged {
return actual, false, false
}
if p != nil {
return *(*V)(p), true, true
}
}
}
// LoadAndDelete deletes the value for a key, returning the previous value if any.
// The loaded result reports whether the key was present.
func (m *ComparableMap[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
read := m.loadReadOnly()
e, ok := read.m[key]
if !ok && read.amended {
m.mu.Lock()
read = m.loadReadOnly()
e, ok = read.m[key]
if !ok && read.amended {
e, ok = m.dirty[key]
delete(m.dirty, key)
// Regardless of whether the comparableEntry was present, record a miss: this key
// will take the slow path until the dirty map is promoted to the read
// map.
m.missLocked()
}
m.mu.Unlock()
}
if ok {
return e.delete()
}
return value, false
}
// Delete deletes the value for a key.
func (m *ComparableMap[K, V]) Delete(key K) {
m.LoadAndDelete(key)
}
func (e *comparableEntry[V]) delete() (value V, ok bool) {
for {
p := atomic.LoadPointer(&e.p)
if p == nil || p == comparableExpunged {
return value, false
}
if atomic.CompareAndSwapPointer(&e.p, p, nil) {
return *(*V)(p), true
}
}
}
// trySwap swaps a value if the comparableEntry has not been comparableExpunged.
//
// If the comparableEntry is comparableExpunged, trySwap returns false and leaves the comparableEntry
// unchanged.
func (e *comparableEntry[V]) trySwap(i *V) (*V, bool) {
for {
p := atomic.LoadPointer(&e.p)
if p == comparableExpunged {
return nil, false
}
if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
return (*V)(p), true
}
}
}
// Swap swaps the value for a key and returns the previous value if any.
// The loaded result reports whether the key was present.
func (m *ComparableMap[K, V]) Swap(key K, value V) (previous V, loaded bool) {
read := m.loadReadOnly()
if e, ok := read.m[key]; ok {
if v, ok := e.trySwap(&value); ok {
if v == nil {
return previous, false
}
return *v, true
}
}
m.mu.Lock()
read = m.loadReadOnly()
if e, ok := read.m[key]; ok {
if e.unexpungeLocked() {
// The comparableEntry was previously comparableExpunged, which implies that there is a
// non-nil dirty map and this comparableEntry is not in it.
m.dirty[key] = e
}
if v := e.swapLocked(&value); v != nil {
loaded = true
previous = *v
}
} else if e, ok := m.dirty[key]; ok {
if v := e.swapLocked(&value); v != nil {
loaded = true
previous = *v
}
} else {
if !read.amended {
// We're adding the first new key to the dirty map.
// Make sure it is allocated and mark the read-only map as incomplete.
m.dirtyLocked()
m.read.Store(&comparableReadOnly[K, V]{m: read.m, amended: true})
}
m.dirty[key] = newComparableEntry(value)
}
m.mu.Unlock()
return previous, loaded
}
// CompareAndSwap swaps the old and new values for key
// if the value stored in the map is equal to old.
// The old value must be of a comparable type.
func (m *ComparableMap[K, V]) CompareAndSwap(key K, old, new V) (swapped bool) {
read := m.loadReadOnly()
if e, ok := read.m[key]; ok {
return e.tryCompareAndSwap(old, new)
} else if !read.amended {
return false // No existing value for key.
}
m.mu.Lock()
defer m.mu.Unlock()
read = m.loadReadOnly()
swapped = false
if e, ok := read.m[key]; ok {
swapped = e.tryCompareAndSwap(old, new)
} else if e, ok := m.dirty[key]; ok {
swapped = e.tryCompareAndSwap(old, new)
// We needed to lock mu in order to load the comparableEntry for key,
// and the operation didn't change the set of keys in the map
// (so it would be made more efficient by promoting the dirty
// map to read-only).
// Count it as a miss so that we will eventually switch to the
// more efficient steady state.
m.missLocked()
}
return swapped
}
// CompareAndDelete deletes the comparableEntry for key if its value is equal to old.
// The old value must be of a comparable type.
//
// If there is no current value for key in the map, CompareAndDelete
// returns false (even if the old value is a nil pointer).
func (m *ComparableMap[K, V]) CompareAndDelete(key K, old V) (deleted bool) {
read := m.loadReadOnly()
e, ok := read.m[key]
if !ok && read.amended {
m.mu.Lock()
read = m.loadReadOnly()
e, ok = read.m[key]
if !ok && read.amended {
e, ok = m.dirty[key]
// Don't delete key from m.dirty: we still need to do the “compare” part
// of the operation. The comparableEntry will eventually be comparableExpunged when the
// dirty map is promoted to the read map.
//
// Regardless of whether the comparableEntry was present, record a miss: this key
// will take the slow path until the dirty map is promoted to the read
// map.
m.missLocked()
}
m.mu.Unlock()
}
for ok {
p := atomic.LoadPointer(&e.p)
if p == nil || p == comparableExpunged || *(*V)(p) != old {
return false
}
if atomic.CompareAndSwapPointer(&e.p, p, nil) {
return true
}
}
return false
}
// Range calls f sequentially for each key and value present in the map.
// If f returns false, range stops the iteration.
//
// Range does not necessarily correspond to any consistent snapshot of the ComparableMap's
// contents: no key will be visited more than once, but if the value for any key
// is stored or deleted concurrently (including by f), Range may reflect any
// mapping for that key from any point during the Range call. Range does not
// block other methods on the receiver; even f itself may call any method on m.
//
// Range may be O(N) with the number of elements in the map even if f returns
// false after a constant number of calls.
func (m *ComparableMap[K, V]) Range(f func(key K, value V) bool) {
// We need to be able to iterate over all of the keys that were already
// present at the start of the call to Range.
// If read.amended is false, then read.m satisfies that property without
// requiring us to hold m.mu for a long time.
read := m.loadReadOnly()
if read.amended {
// m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
// (assuming the caller does not break out early), so a call to Range
// amortizes an entire copy of the map: we can promote the dirty copy
// immediately!
m.mu.Lock()
read = m.loadReadOnly()
if read.amended {
read = comparableReadOnly[K, V]{m: m.dirty}
copyRead := read
m.read.Store(©Read)
m.dirty = nil
m.misses = 0
}
m.mu.Unlock()
}
for k, e := range read.m {
v, ok := e.load()
if !ok {
continue
}
if !f(k, v) {
break
}
}
}
func (m *ComparableMap[K, V]) missLocked() {
m.misses++
if m.misses < len(m.dirty) {
return
}
m.read.Store(&comparableReadOnly[K, V]{m: m.dirty})
m.dirty = nil
m.misses = 0
}
func (m *ComparableMap[K, V]) dirtyLocked() {
if m.dirty != nil {
return
}
read := m.loadReadOnly()
m.dirty = make(map[K]*comparableEntry[V], len(read.m))
for k, e := range read.m {
if !e.tryExpungeLocked() {
m.dirty[k] = e
}
}
}
func (e *comparableEntry[V]) tryExpungeLocked() (isExpunged bool) {
p := atomic.LoadPointer(&e.p)
for p == nil {
if atomic.CompareAndSwapPointer(&e.p, nil, comparableExpunged) {
return true
}
p = atomic.LoadPointer(&e.p)
}
return p == comparableExpunged
}
// Inspired by github.com/SaveTheRbtz/generic-sync-map-go but technically
// written from scratch with Go 1.23's sync.Map.
// Copyright 2024 Runxi Yu (porting it to generics)
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.BSD file.
// Package cmap provides a generic Map safe for concurrent use.
package cmap
import (
"sync"
"sync/atomic"
"unsafe"
)
// Map[K comparable, V any] is like a Go map[K]V but is safe for concurrent use
// by multiple goroutines without additional locking or coordination. Loads,
// stores, and deletes run in amortized constant time.
//
// The Map type is optimized for two common use cases: (1) when the entry for a given
// key is only ever written once but read many times, as in caches that only grow,
// or (2) when multiple goroutines read, write, and overwrite entries for disjoint
// sets of keys. In these two cases, use of a Map may significantly reduce lock
// contention compared to a Go map paired with a separate [Mutex] or [RWMutex].
//
// The zero Map is empty and ready for use. A Map must not be copied after first use.
//
// In the terminology of [the Go memory model], Map arranges that a write operation
// “synchronizes before” any read operation that observes the effect of the write, where
// read and write operations are defined as follows.
// [Map.Load], [Map.LoadAndDelete], [Map.LoadOrStore], [Map.Swap], [Map.CompareAndSwap],
// and [Map.CompareAndDelete] are read operations;
// [Map.Delete], [Map.LoadAndDelete], [Map.Store], and [Map.Swap] are write operations;
// [Map.LoadOrStore] is a write operation when it returns loaded set to false;
// [Map.CompareAndSwap] is a write operation when it returns swapped set to true;
// and [Map.CompareAndDelete] is a write operation when it returns deleted set to true.
//
// [the Go memory model]: https://go.dev/ref/mem
type Map[K comparable, V any] struct {
mu sync.Mutex
// read contains the portion of the map's contents that are safe for
// concurrent access (with or without mu held).
//
// The read field itself is always safe to load, but must only be stored with
// mu held.
//
// Entries stored in read may be updated concurrently without mu, but updating
// a previously-expunged entry requires that the entry be copied to the dirty
// map and unexpunged with mu held.
read atomic.Pointer[readOnly[K, V]]
// dirty contains the portion of the map's contents that require mu to be
// held. To ensure that the dirty map can be promoted to the read map quickly,
// it also includes all of the non-expunged entries in the read map.
//
// Expunged entries are not stored in the dirty map. An expunged entry in the
// clean map must be unexpunged and added to the dirty map before a new value
// can be stored to it.
//
// If the dirty map is nil, the next write to the map will initialize it by
// making a shallow copy of the clean map, omitting stale entries.
dirty map[K]*entry[V]
// misses counts the number of loads since the read map was last updated that
// needed to lock mu to determine whether the key was present.
//
// Once enough misses have occurred to cover the cost of copying the dirty
// map, the dirty map will be promoted to the read map (in the unamended
// state) and the next store to the map will make a new dirty copy.
misses int
}
// readOnly is an immutable struct stored atomically in the Map.read field.
type readOnly[K comparable, V any] struct {
m map[K]*entry[V]
amended bool // true if the dirty map contains some key not in m.
}
// expunged is an arbitrary pointer that marks entries which have been deleted
// from the dirty map.
var expunged = unsafe.Pointer(new(any))
// An entry is a slot in the map corresponding to a particular key.
type entry[V any] struct {
// p points to the value stored for the entry.
//
// If p == nil, the entry has been deleted, and either m.dirty == nil or
// m.dirty[key] is e.
//
// If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
// is missing from m.dirty.
//
// Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
// != nil, in m.dirty[key].
//
// An entry can be deleted by atomic replacement with nil: when m.dirty is
// next created, it will atomically replace nil with expunged and leave
// m.dirty[key] unset.
//
// An entry's associated value can be updated by atomic replacement, provided
// p != expunged. If p == expunged, an entry's associated value can be updated
// only after first setting m.dirty[key] = e so that lookups using the dirty
// map find the entry.
p unsafe.Pointer
}
func newEntry[V any](i V) *entry[V] {
return &entry[V]{p: unsafe.Pointer(&i)}
}
func (m *Map[K, V]) loadReadOnly() readOnly[K, V] {
if p := m.read.Load(); p != nil {
return *p
}
return readOnly[K, V]{}
}
// Load returns the value stored in the map for a key, or nil if no
// value is present.
// The ok result indicates whether value was found in the map.
func (m *Map[K, V]) Load(key K) (value V, ok bool) {
read := m.loadReadOnly()
e, ok := read.m[key]
if !ok && read.amended {
m.mu.Lock()
// Avoid reporting a spurious miss if m.dirty got promoted while we were
// blocked on m.mu. (If further loads of the same key will not miss, it's
// not worth copying the dirty map for this key.)
read = m.loadReadOnly()
e, ok = read.m[key]
if !ok && read.amended {
e, ok = m.dirty[key]
// Regardless of whether the entry was present, record a miss: this key
// will take the slow path until the dirty map is promoted to the read
// map.
m.missLocked()
}
m.mu.Unlock()
}
if !ok {
return *new(V), false
}
return e.load()
}
func (e *entry[V]) load() (value V, ok bool) {
p := atomic.LoadPointer(&e.p)
if p == nil || p == expunged {
return value, false
}
return *(*V)(p), true
}
// Store sets the value for a key.
func (m *Map[K, V]) Store(key K, value V) {
_, _ = m.Swap(key, value)
}
// Clear deletes all the entries, resulting in an empty Map.
func (m *Map[K, V]) Clear() {
read := m.loadReadOnly()
if len(read.m) == 0 && !read.amended {
// Avoid allocating a new readOnly when the map is already clear.
return
}
m.mu.Lock()
defer m.mu.Unlock()
read = m.loadReadOnly()
if len(read.m) > 0 || read.amended {
m.read.Store(&readOnly[K, V]{})
}
clear(m.dirty)
// Don't immediately promote the newly-cleared dirty map on the next operation.
m.misses = 0
}
// unexpungeLocked ensures that the entry is not marked as expunged.
//
// If the entry was previously expunged, it must be added to the dirty map
// before m.mu is unlocked.
func (e *entry[V]) unexpungeLocked() (wasExpunged bool) {
return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
}
// swapLocked unconditionally swaps a value into the entry.
//
// The entry must be known not to be expunged.
func (e *entry[V]) swapLocked(i *V) *V {
return (*V)(atomic.SwapPointer(&e.p, unsafe.Pointer(i)))
}
// LoadOrStore returns the existing value for the key if present.
// Otherwise, it stores and returns the given value.
// The loaded result is true if the value was loaded, false if stored.
func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
// Avoid locking if it's a clean hit.
read := m.loadReadOnly()
if e, ok := read.m[key]; ok {
actual, loaded, ok := e.tryLoadOrStore(value)
if ok {
return actual, loaded
}
}
m.mu.Lock()
read = m.loadReadOnly()
if e, ok := read.m[key]; ok {
if e.unexpungeLocked() {
m.dirty[key] = e
}
actual, loaded, _ = e.tryLoadOrStore(value)
} else if e, ok := m.dirty[key]; ok {
actual, loaded, _ = e.tryLoadOrStore(value)
m.missLocked()
} else {
if !read.amended {
// We're adding the first new key to the dirty map.
// Make sure it is allocated and mark the read-only map as incomplete.
m.dirtyLocked()
m.read.Store(&readOnly[K, V]{m: read.m, amended: true})
}
m.dirty[key] = newEntry(value)
actual, loaded = value, false
}
m.mu.Unlock()
return actual, loaded
}
// tryLoadOrStore atomically loads or stores a value if the entry is not
// expunged.
//
// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
// returns with ok==false.
func (e *entry[V]) tryLoadOrStore(i V) (actual V, loaded, ok bool) {
p := atomic.LoadPointer(&e.p)
if p == expunged {
return actual, false, false
}
if p != nil {
return *(*V)(p), true, true
}
// Copy the pointer after the first load to make this method more amenable
// to escape analysis: if we hit the "load" path or the entry is expunged, we
// shouldn't bother heap-allocating.
ic := i
for {
if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
return i, false, true
}
p = atomic.LoadPointer(&e.p)
if p == expunged {
return actual, false, false
}
if p != nil {
return *(*V)(p), true, true
}
}
}
// LoadAndDelete deletes the value for a key, returning the previous value if any.
// The loaded result reports whether the key was present.
func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
read := m.loadReadOnly()
e, ok := read.m[key]
if !ok && read.amended {
m.mu.Lock()
read = m.loadReadOnly()
e, ok = read.m[key]
if !ok && read.amended {
e, ok = m.dirty[key]
delete(m.dirty, key)
// Regardless of whether the entry was present, record a miss: this key
// will take the slow path until the dirty map is promoted to the read
// map.
m.missLocked()
}
m.mu.Unlock()
}
if ok {
return e.delete()
}
return value, false
}
// Delete deletes the value for a key.
func (m *Map[K, V]) Delete(key K) {
m.LoadAndDelete(key)
}
func (e *entry[V]) delete() (value V, ok bool) {
for {
p := atomic.LoadPointer(&e.p)
if p == nil || p == expunged {
return value, false
}
if atomic.CompareAndSwapPointer(&e.p, p, nil) {
return *(*V)(p), true
}
}
}
// trySwap swaps a value if the entry has not been expunged.
//
// If the entry is expunged, trySwap returns false and leaves the entry
// unchanged.
func (e *entry[V]) trySwap(i *V) (*V, bool) {
for {
p := atomic.LoadPointer(&e.p)
if p == expunged {
return nil, false
}
if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
return (*V)(p), true
}
}
}
// Swap swaps the value for a key and returns the previous value if any.
// The loaded result reports whether the key was present.
func (m *Map[K, V]) Swap(key K, value V) (previous V, loaded bool) {
read := m.loadReadOnly()
if e, ok := read.m[key]; ok {
if v, ok := e.trySwap(&value); ok {
if v == nil {
return previous, false
}
return *v, true
}
}
m.mu.Lock()
read = m.loadReadOnly()
if e, ok := read.m[key]; ok {
if e.unexpungeLocked() {
// The entry was previously expunged, which implies that there is a
// non-nil dirty map and this entry is not in it.
m.dirty[key] = e
}
if v := e.swapLocked(&value); v != nil {
loaded = true
previous = *v
}
} else if e, ok := m.dirty[key]; ok {
if v := e.swapLocked(&value); v != nil {
loaded = true
previous = *v
}
} else {
if !read.amended {
// We're adding the first new key to the dirty map.
// Make sure it is allocated and mark the read-only map as incomplete.
m.dirtyLocked()
m.read.Store(&readOnly[K, V]{m: read.m, amended: true})
}
m.dirty[key] = newEntry(value)
}
m.mu.Unlock()
return previous, loaded
}
// Range calls f sequentially for each key and value present in the map.
// If f returns false, range stops the iteration.
//
// Range does not necessarily correspond to any consistent snapshot of the Map's
// contents: no key will be visited more than once, but if the value for any key
// is stored or deleted concurrently (including by f), Range may reflect any
// mapping for that key from any point during the Range call. Range does not
// block other methods on the receiver; even f itself may call any method on m.
//
// Range may be O(N) with the number of elements in the map even if f returns
// false after a constant number of calls.
func (m *Map[K, V]) Range(f func(key K, value V) bool) {
// We need to be able to iterate over all of the keys that were already
// present at the start of the call to Range.
// If read.amended is false, then read.m satisfies that property without
// requiring us to hold m.mu for a long time.
read := m.loadReadOnly()
if read.amended {
// m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
// (assuming the caller does not break out early), so a call to Range
// amortizes an entire copy of the map: we can promote the dirty copy
// immediately!
m.mu.Lock()
read = m.loadReadOnly()
if read.amended {
read = readOnly[K, V]{m: m.dirty}
copyRead := read
m.read.Store(©Read)
m.dirty = nil
m.misses = 0
}
m.mu.Unlock()
}
for k, e := range read.m {
v, ok := e.load()
if !ok {
continue
}
if !f(k, v) {
break
}
}
}
func (m *Map[K, V]) missLocked() {
m.misses++
if m.misses < len(m.dirty) {
return
}
m.read.Store(&readOnly[K, V]{m: m.dirty})
m.dirty = nil
m.misses = 0
}
func (m *Map[K, V]) dirtyLocked() {
if m.dirty != nil {
return
}
read := m.loadReadOnly()
m.dirty = make(map[K]*entry[V], len(read.m))
for k, e := range read.m {
if !e.tryExpungeLocked() {
m.dirty[k] = e
}
}
}
func (e *entry[V]) tryExpungeLocked() (isExpunged bool) {
p := atomic.LoadPointer(&e.p)
for p == nil {
if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
return true
}
p = atomic.LoadPointer(&e.p)
}
return p == expunged
}
package config import ( "bufio" "log/slog" "os"
"go.lindenii.runxiyu.org/forge/forged/internal/common/scfg"
"go.lindenii.runxiyu.org/forge/forged/internal/database"
"go.lindenii.runxiyu.org/forge/forged/internal/hooki" "go.lindenii.runxiyu.org/forge/forged/internal/irc" "go.lindenii.runxiyu.org/forge/forged/internal/scfg"
"go.lindenii.runxiyu.org/forge/forged/internal/incoming/hooks" "go.lindenii.runxiyu.org/forge/forged/internal/incoming/lmtp" "go.lindenii.runxiyu.org/forge/forged/internal/incoming/ssh" "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web" "go.lindenii.runxiyu.org/forge/forged/internal/ipc/irc"
)
type Config struct {
HTTP struct {
Net string `scfg:"net"`
Addr string `scfg:"addr"`
CookieExpiry int `scfg:"cookie_expiry"`
Root string `scfg:"root"`
ReadTimeout uint32 `scfg:"read_timeout"`
WriteTimeout uint32 `scfg:"write_timeout"`
IdleTimeout uint32 `scfg:"idle_timeout"`
ReverseProxy bool `scfg:"reverse_proxy"`
} `scfg:"http"`
Hooks hooki.Config `scfg:"hooks"`
LMTP struct {
Socket string `scfg:"socket"`
Domain string `scfg:"domain"`
MaxSize int64 `scfg:"max_size"`
WriteTimeout uint32 `scfg:"write_timeout"`
ReadTimeout uint32 `scfg:"read_timeout"`
} `scfg:"lmtp"`
Git struct {
DB database.Config `scfg:"db"`
Web web.Config `scfg:"web"`
Hooks hooks.Config `scfg:"hooks"`
LMTP lmtp.Config `scfg:"lmtp"`
SSH ssh.Config `scfg:"ssh"`
IRC irc.Config `scfg:"irc"`
Git struct {
RepoDir string `scfg:"repo_dir"` Socket string `scfg:"socket"` DaemonPath string `scfg:"daemon_path"` } `scfg:"git"`
SSH struct {
Net string `scfg:"net"`
Addr string `scfg:"addr"`
Key string `scfg:"key"`
Root string `scfg:"root"`
} `scfg:"ssh"`
IRC irc.Config `scfg:"irc"`
General struct {
Title string `scfg:"title"`
} `scfg:"general"`
DB database.Config `scfg:"db"`
Pprof struct {
Net string `scfg:"net"`
Addr string `scfg:"addr"`
} `scfg:"pprof"`
}
func Open(path string) (config Config, err error) {
var configFile *os.File
if configFile, err = os.Open(path); err != nil {
return config, err
}
defer configFile.Close()
decoder := scfg.NewDecoder(bufio.NewReader(configFile))
if err = decoder.Decode(&config); err != nil {
return config, err
}
for _, u := range decoder.UnknownDirectives() {
slog.Warn("unknown configuration directive", "directive", u)
}
return config, err
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package git2c
import (
"fmt"
"net"
"go.lindenii.runxiyu.org/forge/forged/internal/bare"
)
// Client represents a connection to the git2d backend daemon.
type Client struct {
socketPath string
conn net.Conn
writer *bare.Writer
reader *bare.Reader
}
// NewClient establishes a connection to a git2d socket and returns a new Client.
func NewClient(socketPath string) (*Client, error) {
conn, err := net.Dial("unix", socketPath)
if err != nil {
return nil, fmt.Errorf("git2d connection failed: %w", err)
}
writer := bare.NewWriter(conn)
reader := bare.NewReader(conn)
return &Client{
socketPath: socketPath,
conn: conn,
writer: writer,
reader: reader,
}, nil
}
// Close terminates the underlying socket connection.
func (c *Client) Close() error {
if c.conn != nil {
return c.conn.Close()
}
return nil
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package git2c
import (
"encoding/hex"
"errors"
"fmt"
"io"
)
// CmdIndex requests a repository index from git2d and returns the list of commits
// and the contents of a README file if available.
func (c *Client) CmdIndex(repoPath string) ([]Commit, *FilenameContents, error) {
if err := c.writer.WriteData([]byte(repoPath)); err != nil {
return nil, nil, fmt.Errorf("sending repo path failed: %w", err)
}
if err := c.writer.WriteUint(1); err != nil {
return nil, nil, fmt.Errorf("sending command failed: %w", err)
}
status, err := c.reader.ReadUint()
if err != nil {
return nil, nil, fmt.Errorf("reading status failed: %w", err)
}
if status != 0 {
return nil, nil, fmt.Errorf("git2d error: %d", status)
}
// README
readmeRaw, err := c.reader.ReadData()
if err != nil {
readmeRaw = nil
}
readmeFilename := "README.md" // TODO
readme := &FilenameContents{Filename: readmeFilename, Content: readmeRaw}
// Commits
var commits []Commit
for {
id, err := c.reader.ReadData()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return nil, nil, fmt.Errorf("reading commit ID failed: %w", err)
}
title, _ := c.reader.ReadData()
authorName, _ := c.reader.ReadData()
authorEmail, _ := c.reader.ReadData()
authorDate, _ := c.reader.ReadData()
commits = append(commits, Commit{
Hash: hex.EncodeToString(id),
Author: string(authorName),
Email: string(authorEmail),
Date: string(authorDate),
Message: string(title),
})
}
return commits, readme, nil
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package git2c
import (
"errors"
"fmt"
"io"
)
// CmdTreeRaw queries git2d for a tree or blob object at the given path within the repository.
// It returns either a directory listing or the contents of a file.
func (c *Client) CmdTreeRaw(repoPath, pathSpec string) ([]TreeEntry, string, error) {
if err := c.writer.WriteData([]byte(repoPath)); err != nil {
return nil, "", fmt.Errorf("sending repo path failed: %w", err)
}
if err := c.writer.WriteUint(2); err != nil {
return nil, "", fmt.Errorf("sending command failed: %w", err)
}
if err := c.writer.WriteData([]byte(pathSpec)); err != nil {
return nil, "", fmt.Errorf("sending path failed: %w", err)
}
status, err := c.reader.ReadUint()
if err != nil {
return nil, "", fmt.Errorf("reading status failed: %w", err)
}
switch status {
case 0:
kind, err := c.reader.ReadUint()
if err != nil {
return nil, "", fmt.Errorf("reading object kind failed: %w", err)
}
switch kind {
case 1:
// Tree
count, err := c.reader.ReadUint()
if err != nil {
return nil, "", fmt.Errorf("reading entry count failed: %w", err)
}
var files []TreeEntry
for range count {
typeCode, err := c.reader.ReadUint()
if err != nil {
return nil, "", fmt.Errorf("error reading entry type: %w", err)
}
mode, err := c.reader.ReadUint()
if err != nil {
return nil, "", fmt.Errorf("error reading entry mode: %w", err)
}
size, err := c.reader.ReadUint()
if err != nil {
return nil, "", fmt.Errorf("error reading entry size: %w", err)
}
name, err := c.reader.ReadData()
if err != nil {
return nil, "", fmt.Errorf("error reading entry name: %w", err)
}
files = append(files, TreeEntry{
Name: string(name),
Mode: fmt.Sprintf("%06o", mode),
Size: size,
IsFile: typeCode == 2,
IsSubtree: typeCode == 1,
})
}
return files, "", nil
case 2:
// Blob
content, err := c.reader.ReadData()
if err != nil && !errors.Is(err, io.EOF) {
return nil, "", fmt.Errorf("error reading file content: %w", err)
}
return nil, string(content), nil
default:
return nil, "", fmt.Errorf("unknown kind: %d", kind)
}
case 3:
return nil, "", fmt.Errorf("path not found: %s", pathSpec)
default:
return nil, "", fmt.Errorf("unknown status code: %d", status)
}
}
// Package git2c provides routines to interact with the git2d backend daemon. package git2c
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package git2c
// Commit represents a single commit object retrieved from the git2d daemon.
type Commit struct {
Hash string
Author string
Email string
Date string
Message string
}
// FilenameContents holds the filename and byte contents of a file, such as a README.
type FilenameContents struct {
Filename string
Content []byte
}
// TreeEntry represents a file or directory entry within a Git tree object.
type TreeEntry struct {
Name string
Mode string
Size uint64
IsFile bool
IsSubtree bool
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
// TODO: Make the C part report detailed error messages too
package git2c
import "errors"
var (
Success error
ErrUnknown = errors.New("git2c: unknown error")
ErrPath = errors.New("git2c: get tree entry by path failed")
ErrRevparse = errors.New("git2c: revparse failed")
ErrReadme = errors.New("git2c: no readme")
ErrBlobExpected = errors.New("git2c: blob expected")
ErrEntryToObject = errors.New("git2c: tree entry to object conversion failed")
ErrBlobRawContent = errors.New("git2c: get blob raw content failed")
ErrRevwalk = errors.New("git2c: revwalk failed")
ErrRevwalkPushHead = errors.New("git2c: revwalk push head failed")
ErrBareProto = errors.New("git2c: bare protocol error")
)
func Perror(errno uint) error {
switch errno {
case 0:
return Success
case 3:
return ErrPath
case 4:
return ErrRevparse
case 5:
return ErrReadme
case 6:
return ErrBlobExpected
case 7:
return ErrEntryToObject
case 8:
return ErrBlobRawContent
case 9:
return ErrRevwalk
case 10:
return ErrRevwalkPushHead
case 11:
return ErrBareProto
}
return ErrUnknown
}
package hooki
package hooks
import ( "fmt" "net" "github.com/gliderlabs/ssh"
"go.lindenii.runxiyu.org/forge/forged/internal/cmap" "go.lindenii.runxiyu.org/forge/forged/internal/misc"
"go.lindenii.runxiyu.org/forge/forged/internal/common/cmap" "go.lindenii.runxiyu.org/forge/forged/internal/common/misc"
)
type Pool struct {
type Server struct {
hookMap cmap.Map[string, hookInfo]
socketPath string
executablesPath string
}
type Config struct {
Socket string `scfg:"socket"`
Execs string `scfg:"execs"`
}
type hookInfo struct {
session ssh.Session
pubkey string
directAccess bool
repoPath string
userID int
userType string
repoID int
groupPath []string
repoName string
contribReq string
}
func New(config Config) (pool Pool) {
pool.socketPath = config.Socket
pool.executablesPath = config.Execs
return
func New(config Config) (pool *Server) {
return &Server{
socketPath: config.Socket,
executablesPath: config.Execs,
}
}
func (pool *Pool) Run() error {
func (pool *Server) Run() error {
listener, _, err := misc.ListenUnixSocket(pool.socketPath)
if err != nil {
return fmt.Errorf("listen unix socket for hooks: %w", err)
}
for {
conn, err := listener.Accept()
if err != nil {
return fmt.Errorf("accept conn: %w", err)
}
go pool.handleConn(conn)
}
}
func (pool *Pool) handleConn(conn net.Conn) {
func (pool *Server) handleConn(conn net.Conn) {
panic("TODO: handle hook connection")
}
// SPDX-FileCopyrightText: Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net>
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
// Package humanize provides functions to convert numbers into human-readable formats.
package humanize
import (
"fmt"
"math"
)
// IBytes produces a human readable representation of an IEC size.
func IBytes(s uint64) string {
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
return humanateBytes(s, 1024, sizes)
}
func humanateBytes(s uint64, base float64, sizes []string) string {
if s < 10 {
return fmt.Sprintf("%d B", s)
}
e := math.Floor(logn(float64(s), base))
suffix := sizes[int(e)]
val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
f := "%.0f %s"
if val < 10 {
f = "%.1f %s"
}
return fmt.Sprintf(f, val, suffix)
}
func logn(n, b float64) float64 {
return math.Log(n) / math.Log(b)
}
package ssh
type Server struct{}
type Config struct {
Net string `scfg:"net"`
Addr string `scfg:"addr"`
Key string `scfg:"key"`
Root string `scfg:"root"`
}
package web
import "net/http"
type Server struct {
httpServer *http.Server
}
type Config struct {
Net string `scfg:"net"`
Addr string `scfg:"addr"`
CookieExpiry int `scfg:"cookie_expiry"`
Root string `scfg:"root"`
ReadTimeout uint32 `scfg:"read_timeout"`
WriteTimeout uint32 `scfg:"write_timeout"`
IdleTimeout uint32 `scfg:"idle_timeout"`
ReverseProxy bool `scfg:"reverse_proxy"`
}
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package irc import ( "crypto/tls" "log/slog" "net"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
"go.lindenii.runxiyu.org/forge/forged/internal/common/misc"
)
// Config contains IRC connection and identity settings for the bot.
// This should usually be a part of the primary config struct.
type Config struct {
Net string `scfg:"net"`
Addr string `scfg:"addr"`
TLS bool `scfg:"tls"`
SendQ uint `scfg:"sendq"`
Nick string `scfg:"nick"`
User string `scfg:"user"`
Gecos string `scfg:"gecos"`
}
// Bot represents an IRC bot client that handles events and allows for sending messages.
type Bot struct {
config *Config
ircSendBuffered chan string
ircSendDirectChan chan misc.ErrorBack[string]
}
// NewBot creates a new Bot instance using the provided configuration.
func NewBot(c *Config) (b *Bot) {
b = &Bot{
config: c,
}
return
}
// Connect establishes a new IRC session and starts handling incoming and outgoing messages.
// This method blocks until an error occurs or the connection is closed.
func (b *Bot) Connect() error {
var err error
var underlyingConn net.Conn
if b.config.TLS {
underlyingConn, err = tls.Dial(b.config.Net, b.config.Addr, nil)
} else {
underlyingConn, err = net.Dial(b.config.Net, b.config.Addr)
}
if err != nil {
return err
}
defer underlyingConn.Close()
conn := NewConn(underlyingConn)
logAndWriteLn := func(s string) (n int, err error) {
slog.Debug("irc tx", "line", s)
return conn.WriteString(s + "\r\n")
}
_, err = logAndWriteLn("NICK " + b.config.Nick)
if err != nil {
return err
}
_, err = logAndWriteLn("USER " + b.config.User + " 0 * :" + b.config.Gecos)
if err != nil {
return err
}
readLoopError := make(chan error)
writeLoopAbort := make(chan struct{})
go func() {
for {
select {
case <-writeLoopAbort:
return
default:
}
msg, line, err := conn.ReadMessage()
if err != nil {
readLoopError <- err
return
}
slog.Debug("irc rx", "line", line)
switch msg.Command {
case "001":
_, err = logAndWriteLn("JOIN #chat")
if err != nil {
readLoopError <- err
return
}
case "PING":
_, err = logAndWriteLn("PONG :" + msg.Args[0])
if err != nil {
readLoopError <- err
return
}
case "JOIN":
c, ok := msg.Source.(Client)
if !ok {
slog.Error("unable to convert source of JOIN to client")
}
if c.Nick != b.config.Nick {
continue
}
default:
}
}
}()
for {
select {
case err = <-readLoopError:
return err
case line := <-b.ircSendBuffered:
_, err = logAndWriteLn(line)
if err != nil {
select {
case b.ircSendBuffered <- line:
default:
slog.Error("unable to requeue message", "line", line)
}
writeLoopAbort <- struct{}{}
return err
}
case lineErrorBack := <-b.ircSendDirectChan:
_, err = logAndWriteLn(lineErrorBack.Content)
lineErrorBack.ErrorChan <- err
if err != nil {
writeLoopAbort <- struct{}{}
return err
}
}
}
}
// SendDirect sends an IRC message directly to the connection and bypasses
// the buffering system.
func (b *Bot) SendDirect(line string) error {
ech := make(chan error, 1)
b.ircSendDirectChan <- misc.ErrorBack[string]{
Content: line,
ErrorChan: ech,
}
return <-ech
}
// Send queues a message to be sent asynchronously via the buffered send queue.
// If the queue is full, the message is dropped and an error is logged.
func (b *Bot) Send(line string) {
select {
case b.ircSendBuffered <- line:
default:
slog.Error("irc sendq full", "line", line)
}
}
// ConnectLoop continuously attempts to maintain an IRC session.
// If the connection drops, it automatically retries with no delay.
func (b *Bot) ConnectLoop() {
b.ircSendBuffered = make(chan string, b.config.SendQ)
b.ircSendDirectChan = make(chan misc.ErrorBack[string])
for {
err := b.Connect()
slog.Error("irc session error", "error", err)
}
}
package irc import ( "bufio" "net" "slices"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
"go.lindenii.runxiyu.org/forge/forged/internal/common/misc"
)
type Conn struct {
netConn net.Conn
bufReader *bufio.Reader
}
func NewConn(netConn net.Conn) Conn {
return Conn{
netConn: netConn,
bufReader: bufio.NewReader(netConn),
}
}
func (c *Conn) ReadMessage() (msg Message, line string, err error) {
raw, err := c.bufReader.ReadSlice('\n')
if err != nil {
return
}
if raw[len(raw)-1] == '\n' {
raw = raw[:len(raw)-1]
}
if raw[len(raw)-1] == '\r' {
raw = raw[:len(raw)-1]
}
lineBytes := slices.Clone(raw)
line = misc.BytesToString(lineBytes)
msg, err = Parse(lineBytes)
return
}
func (c *Conn) Write(p []byte) (n int, err error) {
return c.netConn.Write(p)
}
func (c *Conn) WriteString(s string) (n int, err error) {
return c.netConn.Write(misc.StringToBytes(s))
}
// Package irc provides basic IRC bot functionality. package irc
package irc
import "errors"
var (
ErrInvalidIRCv3Tag = errors.New("invalid ircv3 tag")
ErrMalformedMsg = errors.New("malformed irc message")
)
// SPDX-License-Identifier: MIT // SPDX-FileCopyrightText: Copyright (c) 2018-2024 luk3yx <https://luk3yx.github.io> // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package irc import ( "bytes"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
"go.lindenii.runxiyu.org/forge/forged/internal/common/misc"
)
type Message struct {
Command string
Source Source
Tags map[string]string
Args []string
}
// All strings returned are borrowed from the input byte slice.
func Parse(raw []byte) (msg Message, err error) {
sp := bytes.Split(raw, []byte{' '}) // TODO: Use bytes.Cut instead here
if bytes.HasPrefix(sp[0], []byte{'@'}) { // TODO: Check size manually
if len(sp[0]) < 2 {
err = ErrMalformedMsg
return
}
sp[0] = sp[0][1:]
msg.Tags, err = tagsToMap(sp[0])
if err != nil {
return
}
if len(sp) < 2 {
err = ErrMalformedMsg
return
}
sp = sp[1:]
} else {
msg.Tags = nil // TODO: Is a nil map the correct thing to use here?
}
if bytes.HasPrefix(sp[0], []byte{':'}) { // TODO: Check size manually
if len(sp[0]) < 2 {
err = ErrMalformedMsg
return
}
sp[0] = sp[0][1:]
msg.Source = parseSource(sp[0])
if len(sp) < 2 {
err = ErrMalformedMsg
return
}
sp = sp[1:]
}
msg.Command = misc.BytesToString(sp[0])
if len(sp) < 2 {
return
}
sp = sp[1:]
for i := 0; i < len(sp); i++ {
if len(sp[i]) == 0 {
continue
}
if sp[i][0] == ':' {
if len(sp[i]) < 2 {
sp[i] = []byte{}
} else {
sp[i] = sp[i][1:]
}
msg.Args = append(msg.Args, misc.BytesToString(bytes.Join(sp[i:], []byte{' '})))
// TODO: Avoid Join by not using sp in the first place
break
}
msg.Args = append(msg.Args, misc.BytesToString(sp[i]))
}
return
}
var ircv3TagEscapes = map[byte]byte{ //nolint:gochecknoglobals
':': ';',
's': ' ',
'r': '\r',
'n': '\n',
}
func tagsToMap(raw []byte) (tags map[string]string, err error) {
tags = make(map[string]string)
for rawTag := range bytes.SplitSeq(raw, []byte{';'}) {
key, value, found := bytes.Cut(rawTag, []byte{'='})
if !found {
err = ErrInvalidIRCv3Tag
return
}
if len(value) == 0 {
tags[misc.BytesToString(key)] = ""
} else {
if !bytes.Contains(value, []byte{'\\'}) {
tags[misc.BytesToString(key)] = misc.BytesToString(value)
} else {
valueUnescaped := bytes.NewBuffer(make([]byte, 0, len(value)))
for i := 0; i < len(value); i++ {
if value[i] == '\\' {
i++
byteUnescaped, ok := ircv3TagEscapes[value[i]]
if !ok {
byteUnescaped = value[i]
}
valueUnescaped.WriteByte(byteUnescaped)
} else {
valueUnescaped.WriteByte(value[i])
}
}
tags[misc.BytesToString(key)] = misc.BytesToString(valueUnescaped.Bytes())
}
}
}
return
}
// SPDX-License-Identifier: MIT // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package irc import ( "bytes"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
"go.lindenii.runxiyu.org/forge/forged/internal/common/misc"
)
type Source interface {
AsSourceString() string
}
func parseSource(s []byte) Source {
nick, userhost, found := bytes.Cut(s, []byte{'!'})
if !found {
return Server{name: misc.BytesToString(s)}
}
user, host, found := bytes.Cut(userhost, []byte{'@'})
if !found {
return Server{name: misc.BytesToString(s)}
}
return Client{
Nick: misc.BytesToString(nick),
User: misc.BytesToString(user),
Host: misc.BytesToString(host),
}
}
type Server struct {
name string
}
func (s Server) AsSourceString() string {
return s.name
}
type Client struct {
Nick string
User string
Host string
}
func (c Client) AsSourceString() string {
return c.Nick + "!" + c.User + "@" + c.Host
}
package lmtp import ( "fmt" "net"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
"go.lindenii.runxiyu.org/forge/forged/internal/common/misc"
)
type Pool struct {
type Server struct {
socket string
domain string
maxSize int64
writeTimeout uint32
readTimeout uint32
}
type Config struct {
Socket string `scfg:"socket"`
Domain string `scfg:"domain"`
MaxSize int64 `scfg:"max_size"`
WriteTimeout uint32 `scfg:"write_timeout"`
ReadTimeout uint32 `scfg:"read_timeout"`
}
func New(config Config) (pool Pool) {
pool.socket = config.Socket
pool.domain = config.Domain
pool.maxSize = config.MaxSize
pool.writeTimeout = config.WriteTimeout
pool.readTimeout = config.ReadTimeout
return pool
func New(config Config) (pool *Server) {
return &Server{
socket: config.Socket,
domain: config.Domain,
maxSize: config.MaxSize,
writeTimeout: config.WriteTimeout,
readTimeout: config.ReadTimeout,
}
}
func (pool *Pool) Run() error {
func (pool *Server) Run() error {
listener, _, err := misc.ListenUnixSocket(pool.socket)
if err != nil {
return fmt.Errorf("listen unix socket for LMTP: %w", err)
}
for {
conn, err := listener.Accept()
if err != nil {
return fmt.Errorf("accept conn: %w", err)
}
go pool.handleConn(conn)
}
}
func (pool *Pool) handleConn(conn net.Conn) {
func (pool *Server) handleConn(conn net.Conn) {
panic("TODO: handle LMTP connection")
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package misc
// ErrorBack wraps a value and a channel for communicating an associated error.
// Typically used to get an error response after sending data across a channel.
type ErrorBack[T any] struct {
Content T
ErrorChan chan error
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package misc
import (
"io"
"io/fs"
"os"
)
// DeployBinary copies the contents of a binary file to the target destination path.
// The destination file is created with executable permissions.
func DeployBinary(src fs.File, dst string) (err error) {
var dstFile *os.File
if dstFile, err = os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755); err != nil {
return err
}
defer dstFile.Close()
_, err = io.Copy(dstFile, src)
return err
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package misc
import "iter"
// iterSeqLimit returns an iterator equivalent to the supplied one, but stops
// after n iterations.
func IterSeqLimit[T any](s iter.Seq[T], n uint) iter.Seq[T] {
return func(yield func(T) bool) {
var iterations uint
for v := range s {
if iterations > n-1 {
return
}
if !yield(v) {
return
}
iterations++
}
}
}
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> // Package misc provides miscellaneous functions and other definitions. package misc
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package misc
// FirstOrPanic returns the value or panics if the error is non-nil.
func FirstOrPanic[T any](v T, err error) T {
if err != nil {
panic(err)
}
return v
}
// NoneOrPanic panics if the provided error is non-nil.
func NoneOrPanic(err error) {
if err != nil {
panic(err)
}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package misc
import "strings"
// sliceContainsNewlines returns true if and only if the given slice contains
// one or more strings that contains newlines.
func SliceContainsNewlines(s []string) bool {
for _, v := range s {
if strings.Contains(v, "\n") {
return true
}
}
return false
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package misc
import (
"net/url"
"strings"
)
// These are all trivial functions that are intended to be used in HTML
// templates.
// FirstLine returns the first line of a string.
func FirstLine(s string) string {
before, _, _ := strings.Cut(s, "\n")
return before
}
// PathEscape escapes the input as an URL path segment.
func PathEscape(s string) string {
return url.PathEscape(s)
}
// QueryEscape escapes the input as an URL query segment.
func QueryEscape(s string) string {
return url.QueryEscape(s)
}
// Dereference dereferences a pointer.
func Dereference[T any](p *T) T {
return *p
}
// DereferenceOrZero dereferences a pointer. If the pointer is nil, the zero
// value of its associated type is returned instead.
func DereferenceOrZero[T any](p *T) T {
if p != nil {
return *p
}
var z T
return z
}
// Minus subtracts two numbers.
func Minus(a, b int) int {
return a - b
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package misc
import "unsafe"
// StringToBytes converts a string to a byte slice without copying the string.
// Memory is borrowed from the string.
// The resulting byte slice must not be modified in any form.
func StringToBytes(s string) (bytes []byte) {
return unsafe.Slice(unsafe.StringData(s), len(s))
}
// BytesToString converts a byte slice to a string without copying the bytes.
// Memory is borrowed from the byte slice.
// The source byte slice must not be modified.
func BytesToString(b []byte) string {
return unsafe.String(unsafe.SliceData(b), len(b))
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package misc
import (
"net/http"
"net/url"
"strings"
)
// ParseReqURI parses an HTTP request URL, and returns a slice of path segments
// and the query parameters. It handles %2F correctly.
func ParseReqURI(requestURI string) (segments []string, params url.Values, err error) {
path, paramsStr, _ := strings.Cut(requestURI, "?")
segments, err = PathToSegments(path)
if err != nil {
return
}
params, err = url.ParseQuery(paramsStr)
return
}
// PathToSegments splits a path into unescaped segments. It handles %2F correctly.
func PathToSegments(path string) (segments []string, err error) {
segments = strings.Split(strings.TrimPrefix(path, "/"), "/")
for i, segment := range segments {
segments[i], err = url.PathUnescape(segment)
if err != nil {
return
}
}
return
}
// RedirectDir returns true and redirects the user to a version of the URL with
// a trailing slash, if and only if the request URL does not already have a
// trailing slash.
func RedirectDir(writer http.ResponseWriter, request *http.Request) bool {
requestURI := request.RequestURI
pathEnd := strings.IndexAny(requestURI, "?#")
var path, rest string
if pathEnd == -1 {
path = requestURI
} else {
path = requestURI[:pathEnd]
rest = requestURI[pathEnd:]
}
if !strings.HasSuffix(path, "/") {
http.Redirect(writer, request, path+"/"+rest, http.StatusSeeOther)
return true
}
return false
}
// RedirectNoDir returns true and redirects the user to a version of the URL
// without a trailing slash, if and only if the request URL has a trailing
// slash.
func RedirectNoDir(writer http.ResponseWriter, request *http.Request) bool {
requestURI := request.RequestURI
pathEnd := strings.IndexAny(requestURI, "?#")
var path, rest string
if pathEnd == -1 {
path = requestURI
} else {
path = requestURI[:pathEnd]
rest = requestURI[pathEnd:]
}
if strings.HasSuffix(path, "/") {
http.Redirect(writer, request, strings.TrimSuffix(path, "/")+rest, http.StatusSeeOther)
return true
}
return false
}
// RedirectUnconditionally unconditionally redirects the user back to the
// current page while preserving query parameters.
func RedirectUnconditionally(writer http.ResponseWriter, request *http.Request) {
requestURI := request.RequestURI
pathEnd := strings.IndexAny(requestURI, "?#")
var path, rest string
if pathEnd == -1 {
path = requestURI
} else {
path = requestURI[:pathEnd]
rest = requestURI[pathEnd:]
}
http.Redirect(writer, request, path+rest, http.StatusSeeOther)
}
// SegmentsToURL joins URL segments to the path component of a URL.
// Each segment is escaped properly first.
func SegmentsToURL(segments []string) string {
for i, segment := range segments {
segments[i] = url.PathEscape(segment)
}
return strings.Join(segments, "/")
}
// AnyContain returns true if and only if ss contains a string that contains c.
func AnyContain(ss []string, c string) bool {
for _, s := range ss {
if strings.Contains(s, c) {
return true
}
}
return false
}
package misc
import (
"errors"
"fmt"
"net"
"syscall"
)
func ListenUnixSocket(path string) (listener net.Listener, replaced bool, err error) {
listener, err = net.Listen("unix", path)
if errors.Is(err, syscall.EADDRINUSE) {
replaced = true
if unlinkErr := syscall.Unlink(path); unlinkErr != nil {
return listener, false, fmt.Errorf("remove existing socket %q: %w", path, unlinkErr)
}
listener, err = net.Listen("unix", path)
}
if err != nil {
return listener, replaced, fmt.Errorf("listen on unix socket %q: %w", path, err)
}
return listener, replaced, nil
}
linters:
enable-all: true
disable:
- perfsprint
- wsl
- varnamelen
- nlreturn
- exhaustruct
- wrapcheck
- lll
- exhaustive
- intrange
- godox
- nestif
- err113
- staticcheck
- errorlint
- cyclop
- nonamedreturns
- funlen
- gochecknoglobals
- tenv
issues:
max-issues-per-linter: 0
max-same-issues: 0
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr>
package scfg
import (
"bufio"
"fmt"
"io"
"os"
"strings"
)
// This limits the max block nesting depth to prevent stack overflows.
const maxNestingDepth = 1000
// Load loads a configuration file.
func Load(path string) (Block, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return Read(f)
}
// Read parses a configuration file from an io.Reader.
func Read(r io.Reader) (Block, error) {
scanner := bufio.NewScanner(r)
dec := decoder{scanner: scanner}
block, closingBrace, err := dec.readBlock()
if err != nil {
return nil, err
} else if closingBrace {
return nil, fmt.Errorf("line %v: unexpected '}'", dec.lineno)
}
return block, scanner.Err()
}
type decoder struct {
scanner *bufio.Scanner
lineno int
blockDepth int
}
// readBlock reads a block. closingBrace is true if parsing stopped on '}'
// (otherwise, it stopped on Scanner.Scan).
func (dec *decoder) readBlock() (block Block, closingBrace bool, err error) {
dec.blockDepth++
defer func() {
dec.blockDepth--
}()
if dec.blockDepth >= maxNestingDepth {
return nil, false, fmt.Errorf("exceeded max block depth")
}
for dec.scanner.Scan() {
dec.lineno++
l := dec.scanner.Text()
words, err := splitWords(l)
if err != nil {
return nil, false, fmt.Errorf("line %v: %v", dec.lineno, err)
} else if len(words) == 0 {
continue
}
if len(words) == 1 && l[len(l)-1] == '}' {
closingBrace = true
break
}
var d *Directive
if words[len(words)-1] == "{" && l[len(l)-1] == '{' {
words = words[:len(words)-1]
var name string
params := words
if len(words) > 0 {
name, params = words[0], words[1:]
}
startLineno := dec.lineno
childBlock, childClosingBrace, err := dec.readBlock()
if err != nil {
return nil, false, err
} else if !childClosingBrace {
return nil, false, fmt.Errorf("line %v: unterminated block", startLineno)
}
// Allows callers to tell apart "no block" and "empty block"
if childBlock == nil {
childBlock = Block{}
}
d = &Directive{Name: name, Params: params, Children: childBlock, lineno: dec.lineno}
} else {
d = &Directive{Name: words[0], Params: words[1:], lineno: dec.lineno}
}
block = append(block, d)
}
return block, closingBrace, nil
}
func splitWords(l string) ([]string, error) {
var (
words []string
sb strings.Builder
escape bool
quote rune
wantWSP bool
)
for _, ch := range l {
switch {
case escape:
sb.WriteRune(ch)
escape = false
case wantWSP && (ch != ' ' && ch != '\t'):
return words, fmt.Errorf("atom not allowed after quoted string")
case ch == '\\':
escape = true
case quote != 0 && ch == quote:
quote = 0
wantWSP = true
if sb.Len() == 0 {
words = append(words, "")
}
case quote == 0 && len(words) == 0 && sb.Len() == 0 && ch == '#':
return nil, nil
case quote == 0 && (ch == '\'' || ch == '"'):
if sb.Len() > 0 {
return words, fmt.Errorf("quoted string not allowed after atom")
}
quote = ch
case quote == 0 && (ch == ' ' || ch == '\t'):
if sb.Len() > 0 {
words = append(words, sb.String())
}
sb.Reset()
wantWSP = false
default:
sb.WriteRune(ch)
}
}
if quote != 0 {
return words, fmt.Errorf("unterminated quoted string")
}
if sb.Len() > 0 {
words = append(words, sb.String())
}
return words, nil
}
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr>
// Package scfg parses and formats configuration files.
// Note that this fork of scfg behaves differently from upstream scfg.
package scfg
import (
"fmt"
)
// Block is a list of directives.
type Block []*Directive
// GetAll returns a list of directives with the provided name.
func (blk Block) GetAll(name string) []*Directive {
l := make([]*Directive, 0, len(blk))
for _, child := range blk {
if child.Name == name {
l = append(l, child)
}
}
return l
}
// Get returns the first directive with the provided name.
func (blk Block) Get(name string) *Directive {
for _, child := range blk {
if child.Name == name {
return child
}
}
return nil
}
// Directive is a configuration directive.
type Directive struct {
Name string
Params []string
Children Block
lineno int
}
// ParseParams extracts parameters from the directive. It errors out if the
// user hasn't provided enough parameters.
func (d *Directive) ParseParams(params ...*string) error {
if len(d.Params) < len(params) {
return fmt.Errorf("directive %q: want %v params, got %v", d.Name, len(params), len(d.Params))
}
for i, ptr := range params {
if ptr == nil {
continue
}
*ptr = d.Params[i]
}
return nil
}
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr>
package scfg
import (
"fmt"
"reflect"
"strings"
"sync"
)
// structInfo contains scfg metadata for structs.
type structInfo struct {
param int // index of field storing parameters
children map[string]int // indices of fields storing child directives
}
var (
structCacheMutex sync.Mutex
structCache = make(map[reflect.Type]*structInfo)
)
func getStructInfo(t reflect.Type) (*structInfo, error) {
structCacheMutex.Lock()
defer structCacheMutex.Unlock()
if info := structCache[t]; info != nil {
return info, nil
}
info := &structInfo{
param: -1,
children: make(map[string]int),
}
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Anonymous {
return nil, fmt.Errorf("scfg: anonymous struct fields are not supported")
} else if !f.IsExported() {
continue
}
tag := f.Tag.Get("scfg")
parts := strings.Split(tag, ",")
k, options := parts[0], parts[1:]
if k == "-" {
continue
} else if k == "" {
k = f.Name
}
isParam := false
for _, opt := range options {
switch opt {
case "param":
isParam = true
default:
return nil, fmt.Errorf("scfg: invalid option %q in struct tag", opt)
}
}
if isParam {
if info.param >= 0 {
return nil, fmt.Errorf("scfg: param option specified multiple times in struct tag in %v", t)
}
if parts[0] != "" {
return nil, fmt.Errorf("scfg: name must be empty when param option is specified in struct tag in %v", t)
}
info.param = i
} else {
if _, ok := info.children[k]; ok {
return nil, fmt.Errorf("scfg: key %q specified multiple times in struct tag in %v", k, t)
}
info.children[k] = i
}
}
structCache[t] = info
return info, nil
}
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr>
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package scfg
import (
"encoding"
"fmt"
"io"
"reflect"
"strconv"
)
// Decoder reads and decodes an scfg document from an input stream.
type Decoder struct {
r io.Reader
unknownDirectives []*Directive
}
// NewDecoder returns a new decoder which reads from r.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
// UnknownDirectives returns a slice of all unknown directives encountered
// during Decode.
func (dec *Decoder) UnknownDirectives() []*Directive {
return dec.unknownDirectives
}
// Decode reads scfg document from the input and stores it in the value pointed
// to by v.
//
// If v is nil or not a pointer, Decode returns an error.
//
// Blocks can be unmarshaled to:
//
// - Maps. Each directive is unmarshaled into a map entry. The map key must
// be a string.
// - Structs. Each directive is unmarshaled into a struct field.
//
// Duplicate directives are not allowed, unless the struct field or map value
// is a slice of values representing a directive: structs or maps.
//
// Directives can be unmarshaled to:
//
// - Maps. The children block is unmarshaled into the map. Parameters are not
// allowed.
// - Structs. The children block is unmarshaled into the struct. Parameters
// are allowed if one of the struct fields contains the "param" option in
// its tag.
// - Slices. Parameters are unmarshaled into the slice. Children blocks are
// not allowed.
// - Arrays. Parameters are unmarshaled into the array. The number of
// parameters must match exactly the length of the array. Children blocks
// are not allowed.
// - Strings, booleans, integers, floating-point values, values implementing
// encoding.TextUnmarshaler. Only a single parameter is allowed and is
// unmarshaled into the value. Children blocks are not allowed.
//
// The decoding of each struct field can be customized by the format string
// stored under the "scfg" key in the struct field's tag. The tag contains the
// name of the field possibly followed by a comma-separated list of options.
// The name may be empty in order to specify options without overriding the
// default field name. As a special case, if the field name is "-", the field
// is ignored. The "param" option specifies that directive parameters are
// stored in this field (the name must be empty).
func (dec *Decoder) Decode(v interface{}) error {
block, err := Read(dec.r)
if err != nil {
return err
}
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr || rv.IsNil() {
return fmt.Errorf("scfg: invalid value for unmarshaling")
}
return dec.unmarshalBlock(block, rv)
}
func (dec *Decoder) unmarshalBlock(block Block, v reflect.Value) error {
v = unwrapPointers(v)
t := v.Type()
dirsByName := make(map[string][]*Directive, len(block))
for _, dir := range block {
dirsByName[dir.Name] = append(dirsByName[dir.Name], dir)
}
switch v.Kind() {
case reflect.Map:
if t.Key().Kind() != reflect.String {
return fmt.Errorf("scfg: map key type must be string")
}
if v.IsNil() {
v.Set(reflect.MakeMap(t))
} else if v.Len() > 0 {
clearMap(v)
}
for name, dirs := range dirsByName {
mv := reflect.New(t.Elem()).Elem()
if err := dec.unmarshalDirectiveList(dirs, mv); err != nil {
return err
}
v.SetMapIndex(reflect.ValueOf(name), mv)
}
case reflect.Struct:
si, err := getStructInfo(t)
if err != nil {
return err
}
seen := make(map[int]bool)
for name, dirs := range dirsByName {
fieldIndex, ok := si.children[name]
if !ok {
dec.unknownDirectives = append(dec.unknownDirectives, dirs...)
continue
}
fv := v.Field(fieldIndex)
if err := dec.unmarshalDirectiveList(dirs, fv); err != nil {
return err
}
seen[fieldIndex] = true
}
for name, fieldIndex := range si.children {
if fieldIndex == si.param {
continue
}
if _, ok := seen[fieldIndex]; !ok {
return fmt.Errorf("scfg: missing required directive %q", name)
}
}
default:
return fmt.Errorf("scfg: unsupported type for unmarshaling blocks: %v", t)
}
return nil
}
func (dec *Decoder) unmarshalDirectiveList(dirs []*Directive, v reflect.Value) error {
v = unwrapPointers(v)
t := v.Type()
if v.Kind() != reflect.Slice || !isDirectiveType(t.Elem()) {
if len(dirs) > 1 {
return newUnmarshalDirectiveError(dirs[1], "directive must not be specified more than once")
}
return dec.unmarshalDirective(dirs[0], v)
}
sv := reflect.MakeSlice(t, len(dirs), len(dirs))
for i, dir := range dirs {
if err := dec.unmarshalDirective(dir, sv.Index(i)); err != nil {
return err
}
}
v.Set(sv)
return nil
}
// isDirectiveType checks whether a type can only be unmarshaled as a
// directive, not as a parameter. Accepting too many types here would result in
// ambiguities, see:
// https://lists.sr.ht/~emersion/public-inbox/%3C20230629132458.152205-1-contact%40emersion.fr%3E#%3Ch4Y2peS_YBqY3ar4XlmPDPiNBFpYGns3EBYUx3_6zWEhV2o8_-fBQveRujGADWYhVVCucHBEryFGoPtpC3d3mQ-x10pWnFogfprbQTSvtxc=@emersion.fr%3E
func isDirectiveType(t reflect.Type) bool {
for t.Kind() == reflect.Ptr {
t = t.Elem()
}
textUnmarshalerType := reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
if reflect.PointerTo(t).Implements(textUnmarshalerType) {
return false
}
switch t.Kind() {
case reflect.Struct, reflect.Map:
return true
default:
return false
}
}
func (dec *Decoder) unmarshalDirective(dir *Directive, v reflect.Value) error {
v = unwrapPointers(v)
t := v.Type()
if v.CanAddr() {
if _, ok := v.Addr().Interface().(encoding.TextUnmarshaler); ok {
if len(dir.Children) != 0 {
return newUnmarshalDirectiveError(dir, "directive requires zero children")
}
return unmarshalParamList(dir, v)
}
}
switch v.Kind() {
case reflect.Map:
if len(dir.Params) > 0 {
return newUnmarshalDirectiveError(dir, "directive requires zero parameters")
}
if err := dec.unmarshalBlock(dir.Children, v); err != nil {
return err
}
case reflect.Struct:
si, err := getStructInfo(t)
if err != nil {
return err
}
if si.param >= 0 {
if err := unmarshalParamList(dir, v.Field(si.param)); err != nil {
return err
}
} else {
if len(dir.Params) > 0 {
return newUnmarshalDirectiveError(dir, "directive requires zero parameters")
}
}
if err := dec.unmarshalBlock(dir.Children, v); err != nil {
return err
}
default:
if len(dir.Children) != 0 {
return newUnmarshalDirectiveError(dir, "directive requires zero children")
}
if err := unmarshalParamList(dir, v); err != nil {
return err
}
}
return nil
}
func unmarshalParamList(dir *Directive, v reflect.Value) error {
switch v.Kind() {
case reflect.Slice:
t := v.Type()
sv := reflect.MakeSlice(t, len(dir.Params), len(dir.Params))
for i, param := range dir.Params {
if err := unmarshalParam(param, sv.Index(i)); err != nil {
return newUnmarshalParamError(dir, i, err)
}
}
v.Set(sv)
case reflect.Array:
if len(dir.Params) != v.Len() {
return newUnmarshalDirectiveError(dir, fmt.Sprintf("directive requires exactly %v parameters", v.Len()))
}
for i, param := range dir.Params {
if err := unmarshalParam(param, v.Index(i)); err != nil {
return newUnmarshalParamError(dir, i, err)
}
}
default:
if len(dir.Params) != 1 {
return newUnmarshalDirectiveError(dir, "directive requires exactly one parameter")
}
if err := unmarshalParam(dir.Params[0], v); err != nil {
return newUnmarshalParamError(dir, 0, err)
}
}
return nil
}
func unmarshalParam(param string, v reflect.Value) error {
v = unwrapPointers(v)
t := v.Type()
// TODO: improve our logic following:
// https://cs.opensource.google/go/go/+/refs/tags/go1.21.5:src/encoding/json/decode.go;drc=b9b8cecbfc72168ca03ad586cc2ed52b0e8db409;l=421
if v.CanAddr() {
if v, ok := v.Addr().Interface().(encoding.TextUnmarshaler); ok {
return v.UnmarshalText([]byte(param))
}
}
switch v.Kind() {
case reflect.String:
v.Set(reflect.ValueOf(param))
case reflect.Bool:
switch param {
case "true":
v.Set(reflect.ValueOf(true))
case "false":
v.Set(reflect.ValueOf(false))
default:
return fmt.Errorf("invalid bool parameter %q", param)
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
i, err := strconv.ParseInt(param, 10, t.Bits())
if err != nil {
return fmt.Errorf("invalid %v parameter: %v", t, err)
}
v.Set(reflect.ValueOf(i).Convert(t))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
u, err := strconv.ParseUint(param, 10, t.Bits())
if err != nil {
return fmt.Errorf("invalid %v parameter: %v", t, err)
}
v.Set(reflect.ValueOf(u).Convert(t))
case reflect.Float32, reflect.Float64:
f, err := strconv.ParseFloat(param, t.Bits())
if err != nil {
return fmt.Errorf("invalid %v parameter: %v", t, err)
}
v.Set(reflect.ValueOf(f).Convert(t))
default:
return fmt.Errorf("unsupported type for unmarshaling parameter: %v", t)
}
return nil
}
func unwrapPointers(v reflect.Value) reflect.Value {
for v.Kind() == reflect.Ptr {
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
return v
}
func clearMap(v reflect.Value) {
for _, k := range v.MapKeys() {
v.SetMapIndex(k, reflect.Value{})
}
}
type unmarshalDirectiveError struct {
lineno int
name string
msg string
}
func newUnmarshalDirectiveError(dir *Directive, msg string) *unmarshalDirectiveError {
return &unmarshalDirectiveError{
name: dir.Name,
lineno: dir.lineno,
msg: msg,
}
}
func (err *unmarshalDirectiveError) Error() string {
return fmt.Sprintf("line %v, directive %q: %v", err.lineno, err.name, err.msg)
}
type unmarshalParamError struct {
lineno int
directive string
paramIndex int
err error
}
func newUnmarshalParamError(dir *Directive, paramIndex int, err error) *unmarshalParamError {
return &unmarshalParamError{
directive: dir.Name,
lineno: dir.lineno,
paramIndex: paramIndex,
err: err,
}
}
func (err *unmarshalParamError) Error() string {
return fmt.Sprintf("line %v, directive %q, parameter %v: %v", err.lineno, err.directive, err.paramIndex+1, err.err)
}
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: Copyright (c) 2020 Simon Ser <https://emersion.fr>
package scfg
import (
"errors"
"io"
"strings"
)
var errDirEmptyName = errors.New("scfg: directive with empty name")
// Write writes a parsed configuration to the provided io.Writer.
func Write(w io.Writer, blk Block) error {
enc := newEncoder(w)
err := enc.encodeBlock(blk)
return err
}
// encoder write SCFG directives to an output stream.
type encoder struct {
w io.Writer
lvl int
err error
}
// newEncoder returns a new encoder that writes to w.
func newEncoder(w io.Writer) *encoder {
return &encoder{w: w}
}
func (enc *encoder) push() {
enc.lvl++
}
func (enc *encoder) pop() {
enc.lvl--
}
func (enc *encoder) writeIndent() {
for i := 0; i < enc.lvl; i++ {
enc.write([]byte("\t"))
}
}
func (enc *encoder) write(p []byte) {
if enc.err != nil {
return
}
_, enc.err = enc.w.Write(p)
}
func (enc *encoder) encodeBlock(blk Block) error {
for _, dir := range blk {
if err := enc.encodeDir(*dir); err != nil {
return err
}
}
return enc.err
}
func (enc *encoder) encodeDir(dir Directive) error {
if enc.err != nil {
return enc.err
}
if dir.Name == "" {
enc.err = errDirEmptyName
return enc.err
}
enc.writeIndent()
enc.write([]byte(maybeQuote(dir.Name)))
for _, p := range dir.Params {
enc.write([]byte(" "))
enc.write([]byte(maybeQuote(p)))
}
if len(dir.Children) > 0 {
enc.write([]byte(" {\n"))
enc.push()
if err := enc.encodeBlock(dir.Children); err != nil {
return err
}
enc.pop()
enc.writeIndent()
enc.write([]byte("}"))
}
enc.write([]byte("\n"))
return enc.err
}
const specialChars = "\"\\\r\n'{} \t"
func maybeQuote(s string) string {
if s == "" || strings.ContainsAny(s, specialChars) {
var sb strings.Builder
sb.WriteByte('"')
for _, ch := range s {
if strings.ContainsRune(`"\`, ch) {
sb.WriteByte('\\')
}
sb.WriteRune(ch)
}
sb.WriteByte('"')
return sb.String()
}
return s
}
package server import ( "context" "fmt" "log" "go.lindenii.runxiyu.org/forge/forged/internal/config" "go.lindenii.runxiyu.org/forge/forged/internal/database"
"go.lindenii.runxiyu.org/forge/forged/internal/hooki" "go.lindenii.runxiyu.org/forge/forged/internal/lmtp"
"go.lindenii.runxiyu.org/forge/forged/internal/incoming/hooks" "go.lindenii.runxiyu.org/forge/forged/internal/incoming/lmtp" "go.lindenii.runxiyu.org/forge/forged/internal/incoming/ssh" "go.lindenii.runxiyu.org/forge/forged/internal/incoming/web"
)
type Server struct {
config config.Config
database database.Database hookPool hooki.Pool lmtpPool lmtp.Pool
database database.Database
hookServer *hooks.Server
lmtpServer *lmtp.Server
webServer *web.Server
sshServer *ssh.Server
globalData struct {
SSHPubkey string
SSHFingerprint string
Version string
}
}
func New(ctx context.Context, configPath string) (server *Server, err error) {
server = &Server{}
server.config, err = config.Open(configPath)
if err != nil {
return server, fmt.Errorf("open config: %w", err)
}
// TODO: Should this belong here, or in Run()?
server.database, err = database.Open(ctx, server.config.DB)
if err != nil {
return server, fmt.Errorf("open database: %w", err)
}
server.hookServer = hooks.New(server.config.Hooks) server.lmtpServer = lmtp.New(server.config.LMTP) // TODO: Add HTTP and SSH servers
return server, nil
}
func (s *Server) Run() error {
// TODO: Not running git2d because it should be run separately.
// This needs to be documented somewhere, hence a TODO here for now.
go func() {
s.hookPool = hooki.New(s.config.Hooks)
if err := s.hookPool.Run(); err != nil {
if err := s.hookServer.Run(); err != nil {
log.Fatalf("run hook pool: %v", err)
}
}()
go func() {
s.lmtpPool = lmtp.New(s.config.LMTP)
if err := s.lmtpPool.Run(); err != nil {
if err := s.lmtpServer.Run(); err != nil {
log.Fatalf("run LMTP pool: %v", err)
}
}()
return nil
}