diff options
-rw-r--r-- | .gitignore | 1 | ||||
-rw-r--r-- | backend.go | 205 | ||||
-rw-r--r-- | go.mod | 10 | ||||
-rw-r--r-- | go.sum | 8 | ||||
-rw-r--r-- | iris/backend.go | 448 | ||||
-rw-r--r-- | main.go | 123 | ||||
-rwxr-xr-x | script/mtin | 3 | ||||
-rwxr-xr-x | script/with-metanews | 24 | ||||
-rw-r--r-- | slog/backend.go | 319 |
9 files changed, 1141 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..84ef209 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +metanews diff --git a/backend.go b/backend.go new file mode 100644 index 0000000..0291ffa --- /dev/null +++ b/backend.go @@ -0,0 +1,205 @@ +package main + +import ( + "errors" + "strconv" + "strings" + + "github.com/dustin/go-nntp" + nntpserver "github.com/dustin/go-nntp/server" + "github.com/go-kit/log" + "github.com/go-kit/log/level" +) + +func NewMetaBackend(logger log.Logger, backends ...nntpserver.Backend) (nntpserver.Backend, error) { + mb := &metaBackend{logger: logger, groups: make(map[string]int)} + for _, b := range backends { + if err := mb.add(b); err != nil { + return nil, err + } + } + + return mb, nil +} + +type metaBackend struct { + logger log.Logger + backends []nntpserver.Backend + groups map[string]int +} + +func (mb metaBackend) debug(keyvals ...any) error { return level.Debug(mb.logger).Log(keyvals...) } +func (mb metaBackend) info(keyvals ...any) error { return level.Info(mb.logger).Log(keyvals...) } +func (mb metaBackend) warn(keyvals ...any) error { return level.Warn(mb.logger).Log(keyvals...) } +func (mb metaBackend) err(keyvals ...any) error { return level.Error(mb.logger).Log(keyvals...) } + +func (mb *metaBackend) add(b nntpserver.Backend) error { + grps, err := b.ListGroups(1_000_000) + if err != nil { + return err + } + + _ = mb.info("msg", "adding backend", "group_count", len(grps)) + + i := len(mb.backends) + mb.backends = append(mb.backends, b) + for _, grp := range grps { + mb.groups[grp.Name] = i + } + + return nil +} + +func (mb metaBackend) ListGroups(max int) ([]*nntp.Group, error) { + var groups []*nntp.Group + for _, b := range mb.backends { + grps, err := b.ListGroups(max - len(groups)) + if err != nil { + return nil, err + } + groups = append(groups, grps...) + + if len(groups) == max { + break + } + } + + _ = mb.info( + "msg", "metaBackend method", + "method", "ListGroups", + "count", len(groups), + ) + return groups, nil +} + +func (mb metaBackend) GetGroup(name string) (*nntp.Group, error) { + b, err := mb.backendFor(name) + if err != nil { + return nil, err + } + + grp, err := b.GetGroup(name) + _ = mb.info( + "msg", "metaBackend method", + "method", "GetGroup", + "name", name, + "err", err, + ) + return grp, err +} + +func (mb metaBackend) GetArticles(group *nntp.Group, from, to int64) ([]nntpserver.NumberedArticle, error) { + b, err := mb.backendFor(group.Name) + if err != nil { + return nil, err + } + + articles, err := b.GetArticles(group, from, to) + _ = mb.info( + "msg", "metaBackend method", + "method", "GetArticles", + "group", group.Name, + "from-to", strconv.Itoa(int(from))+"-"+strconv.Itoa(int(to)), + "count", len(articles), + "err", err, + ) + return articles, err +} + +func (mb metaBackend) GetArticle(group *nntp.Group, messageID string) (*nntp.Article, error) { + b, err := mb.backendFor(group.Name) + if err != nil { + return nil, err + } + + article, err := b.GetArticle(group, messageID) + _ = mb.info( + "msg", "metaBackend method", + "method", "GetArticle", + "group", group.Name, + "messageID", messageID, + "err", err, + ) + return article, err +} + +func (mb metaBackend) Post(article *nntp.Article) error { + groupNames := strings.Split(article.Header.Get("Newsgroups"), ",") + for i := range groupNames { + groupNames[i] = strings.Trim(groupNames[i], " ") + } + + bes, err := mb.backendsFor(groupNames) + if err != nil { + return err + } + + var errs []error + for _, b := range bes { + // TODO: need to filter the "Newsgroups" header to only the + // groups relevant to each backend? + errs = append(errs, b.Post(article)) + } + + err = errors.Join(errs...) + _ = mb.info( + "msg", "metaBackend method", + "method", "Post", + "groups", article.Header.Get("Newsgroups"), + "backends", len(bes), + "err", err, + ) + return err +} + +func (mb metaBackend) Authorized() bool { + _ = mb.info( + "msg", "metaBackend method", + "method", "Authorized", + ) + return true +} + +func (mb metaBackend) AllowPost() bool { + _ = mb.info( + "msg", "metaBackend method", + "method", "AllowPost", + ) + return true +} + +func (mb metaBackend) Authenticate(user, _ string) (nntpserver.Backend, error) { + _ = mb.info( + "msg", "metaBackend method", + "method", "Authenticate", + "user", user, + ) + return nil, nil +} + +func (mb metaBackend) backendFor(name string) (nntpserver.Backend, error) { + i, ok := mb.groups[name] + if !ok { + return nil, nntpserver.ErrNoSuchGroup + } + return mb.backends[i], nil +} + +func (mb metaBackend) backendsFor(names []string) ([]nntpserver.Backend, error) { + tbl := make([]bool, len(mb.backends)) + for _, name := range names { + i, ok := mb.groups[name] + if !ok { + return nil, nntpserver.ErrNoSuchGroup + } + tbl[i] = true + } + + backends := make([]nntpserver.Backend, 0, len(mb.backends)) + for i, y := range tbl { + if y { + backends = append(backends, mb.backends[i]) + } + } + return backends, nil +} @@ -0,0 +1,10 @@ +module tildegit.org/tjp/metanews + +go 1.20 + +require ( + github.com/dustin/go-nntp v0.0.0-20210723005859-f00d51cf8cc1 + github.com/go-kit/log v0.2.1 +) + +require github.com/go-logfmt/logfmt v0.5.1 // indirect @@ -0,0 +1,8 @@ +github.com/dustin/go-couch v0.0.0-20160816170231-8251128dab73/go.mod h1:WG/TWzFd/MRvOZ4jjna3FQ+K8AKhb2jOw4S2JMw9VKI= +github.com/dustin/go-nntp v0.0.0-20210723005859-f00d51cf8cc1 h1:R90ND7acg9HKYj3oJBKKefk73DULdC7IlcnS7MV0X1s= +github.com/dustin/go-nntp v0.0.0-20210723005859-f00d51cf8cc1/go.mod h1:elGbp3dKCIIdwu6jm3y6L93EVn+I6MSzYrcZXhpNS3Y= +github.com/dustin/httputil v0.0.0-20170305193905-c47743f54f89/go.mod h1:ZoDWdnxro8Kesk3zrCNOHNFWtajFPSnDMjVEjGjQu/0= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= diff --git a/iris/backend.go b/iris/backend.go new file mode 100644 index 0000000..787db96 --- /dev/null +++ b/iris/backend.go @@ -0,0 +1,448 @@ +package iris + +import ( + "bytes" + "crypto/sha1" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/mail" + "net/textproto" + "os" + "os/exec" + "path" + "sort" + "strconv" + "strings" + "time" + + "github.com/dustin/go-nntp" + nntpserver "github.com/dustin/go-nntp/server" + "github.com/go-kit/log" + "github.com/go-kit/log/level" +) + +var group = &nntp.Group{ + Name: "ctrl-c.iris", + Description: "The iris message board", + Posting: nntp.PostingPermitted, + Low: 1, +} + +const DefaultWaitTime = 30 * time.Second +const msgfile = ".iris.messages" + +// NewBackend builds an iris nntp backend. +// +// The provided waitTime may be <= 0, in which case DefaultWaitTime will be used. +func NewBackend(logger log.Logger, waitTime time.Duration) (nntpserver.Backend, error) { + if waitTime <= 0 { + waitTime = DefaultWaitTime + } + + b := &backend{logger: logger, waitTime: waitTime} + if err := b.refresh(); err != nil { + return nil, err + } + + return b, nil +} + +type backend struct { + logger log.Logger + waitTime time.Duration + lastRead time.Time + messages []*nntp.Article +} + +func (b backend) debug(keyvals ...any) error { return level.Debug(b.logger).Log(keyvals...) } +func (b backend) info(keyvals ...any) error { return level.Info(b.logger).Log(keyvals...) } +func (b backend) warn(keyvals ...any) error { return level.Warn(b.logger).Log(keyvals...) } +func (b backend) err(keyvals ...any) error { return level.Error(b.logger).Log(keyvals...) } + +func (b backend) ListGroups(max int) ([]*nntp.Group, error) { + return []*nntp.Group{group}, nil +} + +func (b *backend) GetGroup(name string) (*nntp.Group, error) { + if name != group.Name { + return nil, nntpserver.ErrNoSuchGroup + } + if err := b.refresh(); err != nil { + return nil, err + } + + return group, nil +} + +func (b *backend) GetArticles(_ *nntp.Group, from, to int64) ([]nntpserver.NumberedArticle, error) { + if err := b.refresh(); err != nil { + return nil, err + } + + numbered := make([]nntpserver.NumberedArticle, 0, len(b.messages)) + for i, msg := range b.messages { + num := int64(i + 1) + if num >= from && num <= to { + numbered = append(numbered, nntpserver.NumberedArticle{ + Num: num, + Article: copyArticle(msg), + }) + } + } + + return numbered, nil +} + +func (b *backend) GetArticle(_ *nntp.Group, messageID string) (*nntp.Article, error) { + if err := b.refresh(); err != nil { + return nil, err + } + + for _, msg := range b.messages { + if msg.Header.Get("Message-Id") == messageID { + return msg, nil + } + } + + num, err := strconv.Atoi(messageID) + if err == nil && num <= len(b.messages) { + return copyArticle(b.messages[num-1]), nil + } + + return nil, nntpserver.ErrInvalidMessageID +} + +func (b backend) Post(article *nntp.Article) error { + // iris replies are all made to a top-level post, there is no grandchild nesting. + // + // but NNTP supports this, so collapse the provided "References" header up to the OP. + parent := b.findOP(article.Header.Get("References")) + if parent != nil { + article.Header.Set("References", parent.MessageID()) + } + + msg, err := msgToIris(article) + if err != nil { + return err + } + return appendMessage(msg) +} + +func (b backend) Authorized() bool { return true } +func (b backend) AllowPost() bool { return true } +func (b backend) Authenticate(_, _ string) (nntpserver.Backend, error) { return nil, nil } + +func (b backend) findOP(ref string) *nntp.Article { + if ref == "" { + return nil + } + // all references should have the same OP so just take the first + msgID := strings.SplitN(ref, " ", 2)[0] + + // traverse backwards expecting most reply activity concentrated late in the total history + for i := len(b.messages) - 1; i >= 0; i-- { + article := b.messages[i] + if article.MessageID() != msgID { + continue + } + + gpID := article.Header.Get("References") + if gpID != "" { + return b.findOP(gpID) + } + return article + } + + return nil +} + +func (b *backend) refresh() error { + now := time.Now() + if b.lastRead.IsZero() || now.Sub(b.lastRead) > b.waitTime { + b.lastRead = now + } else { + return nil + } + + binpath, err := exec.LookPath("iris") + if err != nil { + return err + } + cmd := exec.Command(binpath, "-d") + buf := &bytes.Buffer{} + cmd.Stdout = buf + + if err := cmd.Run(); err != nil { + return err + } + + msgs := irisDump{} + if err := json.NewDecoder(buf).Decode(&msgs); err != nil { + return err + } + + b.messages, err = msgs.Articles() + if err != nil { + return err + } + group.High = int64(len(b.messages)) + group.Count = int64(len(b.messages)) + return nil +} + +func copyArticle(article *nntp.Article) *nntp.Article { + out := *article + out.Body = bytes.NewBuffer(article.Body.(*bytes.Buffer).Bytes()) + return &out +} + +type irisMsg struct { + Hash string `json:"hash"` + EditHash *string `json:"edit_hash"` + IsDeleted *bool `json:"is_deleted"` + Data struct { + Author string `json:"author"` + Parent *string `json:"parent"` + Timestamp string `json:"timestamp"` + Message string `json:"message"` + } `json:"data"` +} + +func (m irisMsg) calcHash() (string, error) { + /* + Careful coding here to match ruby's hash calculation: + ``` + Base64.encode64(Digest::SHA1.digest(m["data"].to_json)) + ``` + + * have to use an encoder rather than json.Marshal so we can + turn off the default HTML escaping (ruby doesn't do this) + * strip trailing newline from JSON encoding output + * add a trailing newline to base64 encoded form + */ + + b := &bytes.Buffer{} + enc := json.NewEncoder(b) + enc.SetEscapeHTML(false) + if err := enc.Encode(m.Data); err != nil { + return "", err + } + + arr := sha1.Sum(bytes.TrimSuffix(b.Bytes(), []byte("\n"))) + s := base64.StdEncoding.EncodeToString(arr[:]) + if !strings.HasSuffix(s, "\n") { + s += "\n" + } + return s, nil +} + +func msgToIris(article *nntp.Article) (*irisMsg, error) { + postTime := time.Now().UTC().Format(time.RFC3339) + + body, err := io.ReadAll(article.Body) + if err != nil { + return nil, err + } + + var msg irisMsg + msg.Data.Author = irisAuthor(article.Header.Get("From")) + msg.Data.Timestamp = postTime + msg.Data.Message = string(body) + refs := article.Header.Get("References") + if refs != "" { + spl := strings.SplitN(refs, " ", 2) + ref := fromMsgID(spl[0]) + msg.Data.Parent = &ref + } + + hash, err := msg.calcHash() + if err != nil { + return nil, err + } + msg.Hash = hash + + return &msg, nil +} + +func irisAuthor(nntpAuthor string) string { + addr, err := mail.ParseAddress(nntpAuthor) + if err != nil { + return nntpAuthor + } + + return addr.Address +} + +type irisDump []irisMsg + +func (dump irisDump) Articles() ([]*nntp.Article, error) { + // calculate the article replacements due to edits + // + // note: this is only a single "hop", and because there can be edits-of-edits + // and edits-of-edits-of-edits, we must actually resolve replacements with a loop. + // + // we need to keep all the hops though because there could have been replies to + // the original or to any intermediate edits. + replacements := make(map[string]string) + for _, msg := range dump { + if msg.EditHash != nil { + replacements[*msg.EditHash] = msg.Hash + } + } + + articles := make([]*nntp.Article, 0, len(dump)-len(replacements)) + + // index iris hashes -> nntp Articles for reference lookups + idx := make(map[string]*nntp.Article) + + sort.SliceStable(dump, func(i, j int) bool { + return dump[i].Data.Timestamp < dump[j].Data.Timestamp + }) + +outer: + for _, msg := range dump { + if _, ok := replacements[msg.Hash]; ok { + continue + } + if msg.EditHash != nil && *msg.EditHash == msg.Hash { + continue + } + + msgID := msgIDFor(&msg) + ts, err := time.Parse(time.RFC3339, msg.Data.Timestamp) + if err != nil { + return nil, err + } + + article := &nntp.Article{ + Header: textproto.MIMEHeader{ + "Message-Id": []string{msgID}, + "From": []string{msg.Data.Author}, + "Newsgroups": []string{group.Name}, + "Date": []string{ts.Format(time.RFC1123Z)}, + }, + } + + if msg.IsDeleted != nil && *msg.IsDeleted { + article.Header.Set("Subject", "**TOPIC DELETED BY AUTHOR**") + article.Body = &bytes.Buffer{} + article.Bytes = 0 + article.Lines = 0 + } else { + article.Body = bytes.NewBufferString(msg.Data.Message) + article.Bytes = len(msg.Data.Message) + article.Lines = strings.Count(msg.Data.Message, "\n") + + if msg.Data.Parent == nil { + article.Header.Set("Subject", strings.SplitN(msg.Data.Message, "\n", 2)[0]) + } else { + parentHash := *msg.Data.Parent + for { + if p, ok := replacements[parentHash]; ok { + if parentHash == p { + continue outer + } + parentHash = p + } else { + break + } + } + msg.Data.Parent = &parentHash + parent, ok := idx[parentHash] + if !ok { + continue + } + parentSubj := strings.TrimPrefix(parent.Header.Get("Subject"), "Re: ") + article.Header.Set("Subject", "Re: "+parentSubj) + } + } + + if msg.Data.Parent != nil { + parent := idx[*msg.Data.Parent] + if parent == nil { + continue + } + parentRefs := parent.Header.Get("References") + if parentRefs != "" { + article.Header.Set("References", parentRefs) + } else { + article.Header.Set("References", parent.MessageID()) + } + } + + articles = append(articles, article) + idx[msg.Hash] = article + } + + return articles, nil +} + +func (id irisDump) MarshalJSON() ([]byte, error) { + buf := &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + + out := bytes.NewBufferString("[\n ") + + for i, msg := range id { + if err := enc.Encode(msg); err != nil { + return nil, err + } + + if i > 0 { + _, _ = out.WriteString(",\n ") + } + _, _ = out.Write(bytes.TrimSuffix(buf.Bytes(), []byte("\n"))) + buf.Reset() + } + _, _ = out.WriteString("\n]") + + return out.Bytes(), nil +} + +func msgIDFor(msg *irisMsg) string { + return fmt.Sprintf("<%s.%s>", + strings.TrimSuffix(msg.Hash, "=\n"), + msg.Data.Author, + ) +} + +func fromMsgID(nntpID string) string { + hash, _, _ := strings.Cut(strings.TrimSuffix(strings.TrimPrefix(nntpID, "<"), ">"), ".") + return hash + "=\n" +} + +func appendMessage(msg *irisMsg) error { + home, err := os.UserHomeDir() + if err != nil { + return err + } + + msgFile, err := os.Open(path.Join(home, msgfile)) + if err != nil { + return err + } + + var msgs irisDump + if err := json.NewDecoder(msgFile).Decode(&msgs); err != nil { + _ = msgFile.Close() + return err + } + _ = msgFile.Close() + msgs = append(msgs, *msg) + + msgFile, err = os.Create(path.Join(home, msgfile)) + if err != nil { + return err + } + defer func() { _ = msgFile.Close() }() + + out, err := msgs.MarshalJSON() + if err != nil { + return err + } + _, err = msgFile.Write(out) + return err +} @@ -0,0 +1,123 @@ +package main + +import ( + "fmt" + stdlog "log" + "net" + "os" + "path" + "time" + + nntpserver "github.com/dustin/go-nntp/server" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/go-kit/log/term" + + "tildegit.org/tjp/metanews/iris" + "tildegit.org/tjp/metanews/slog" +) + +func main() { + logger := setupLogging() + + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + fatal(logger, "network listen failed", err) + } + _ = level.Info(logger).Log( + "msg", "listening", + "addr", l.Addr().String(), + ) + + writeLocation(l.Addr().String(), logger) + + ib, err := iris.NewBackend(logger, 0) + if err != nil { + fatal(logger, "error starting iris backend", err) + } + + sb, err := slog.NewBackend(logger, 0) + if err != nil { + fatal(logger, "error starting slog backend", err) + } + + backend, err := NewMetaBackend(logger, ib, sb) + if err != nil { + fatal(logger, "creating meta backend failed", err) + } + + server := nntpserver.NewServer(backend) + + c, err := l.Accept() + if err != nil { + fatal(logger, "accepting network connection failed", err) + } + _ = level.Info(logger).Log( + "msg", "accepted client connection", + "remote-addr", c.RemoteAddr().String(), + ) + + server.Process(c) +} + +func setupLogging() log.Logger { + base := term.NewLogger(os.Stdout, log.NewLogfmtLogger, func(keyvals ...any) term.FgBgColor { + for i := 0; i < len(keyvals)-1; i += 2 { + if keyvals[i] != "level" { + continue + } + + switch keyvals[i+1] { + case level.DebugValue(): + return term.FgBgColor{Fg: term.Gray} + case level.InfoValue(): + return term.FgBgColor{Fg: term.Green} + case level.WarnValue(): + return term.FgBgColor{Fg: term.Yellow} + case level.ErrorValue(): + return term.FgBgColor{Fg: term.Red} + } + } + + return term.FgBgColor{} + }) + base = log.NewSyncLogger(base) + base = log.With(base, "ts", func() any { + return time.Now().UTC().Format(time.DateTime) + }) + + // go-nntp is noisy on the stdlib log pkg + stdlibLogger := level.Debug(log.With(base, "src", "go-nntp")) + stdlog.SetOutput(log.NewStdlibAdapter(stdlibLogger)) + + return base +} + +func writeLocation(location string, logger log.Logger) { + home, err := os.UserHomeDir() + if err != nil { + fatal(logger, "finding user home dir failed", err) + } + + filepath := path.Join(home, ".metanews-server") + f, err := os.Create(filepath) + if err != nil { + fatal(logger, "creating server location file failed", err) + } + defer func() { _ = f.Close() }() + + if _, err := fmt.Fprintln(f, location); err != nil { + fatal(logger, "failed writing to location file", err) + } + + _ = level.Info(logger).Log( + "msg", "wrote address to location file", + "address", location, + "file", filepath, + ) +} + +func fatal(logger log.Logger, msg string, err error) { + _ = level.Error(logger).Log("msg", msg, "err", err) + os.Exit(1) +} diff --git a/script/mtin b/script/mtin new file mode 100755 index 0000000..3c4883d --- /dev/null +++ b/script/mtin @@ -0,0 +1,3 @@ +#!/usr/bin/env sh + +exec $(dirname $0)/with-metanews rtin diff --git a/script/with-metanews b/script/with-metanews new file mode 100755 index 0000000..7d1e8cc --- /dev/null +++ b/script/with-metanews @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +if [[ -z "$METANEWS_LOG" ]]; then + METANEWS_LOG="/dev/null" +fi + +$(dirname $0)/metanews >>"$METANEWS_LOG" 2>/dev/null & +serverpid="$!" + +while [[ ! -e ~/.metanews-server ]]; do + sleep 0.1 +done +addr="$(cat ~/.metanews-server)" + +function cleanup { + rm ~/.metanews-server + tail --pid $serverpid -f /dev/null +} +trap cleanup EXIT + +env \ + NNTPSERVER="${addr%%:*}" \ + NNTPPORT="${addr##*:}" \ + "$@" diff --git a/slog/backend.go b/slog/backend.go new file mode 100644 index 0000000..613601c --- /dev/null +++ b/slog/backend.go @@ -0,0 +1,319 @@ +package slog + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/fs" + "net/textproto" + "os" + "path" + "sort" + "strconv" + "strings" + "time" + + "github.com/dustin/go-nntp" + nntpserver "github.com/dustin/go-nntp/server" + "github.com/go-kit/log" + "github.com/go-kit/log/level" +) + +var group = &nntp.Group{ + Name: "ctrl-c.slog", + Description: "The slog local blogging platform", + Posting: nntp.PostingPermitted, + Low: 1, +} + +const DefaultWaitTime = 30 * time.Second + +// NewBackend builds a slog nntp backend. +// +// The provided waitTime may be <= 0, in which case DefaultWaitTime will be used. +func NewBackend(logger log.Logger, waitTime time.Duration) (nntpserver.Backend, error) { + if waitTime <= 0 { + waitTime = DefaultWaitTime + } + + b := &backend{logger: logger, waitTime: waitTime, index: make([]indexEntry, 0)} + if err := b.refreshIndex(); err != nil { + return nil, err + } + return b, nil +} + +type backend struct { + logger log.Logger + waitTime time.Duration + lastRead time.Time + index []indexEntry +} + +func (b backend) debug(keyvals ...any) error { return level.Debug(b.logger).Log(keyvals...) } +func (b backend) info(keyvals ...any) error { return level.Info(b.logger).Log(keyvals...) } +func (b backend) warn(keyvals ...any) error { return level.Warn(b.logger).Log(keyvals...) } +func (b backend) err(keyvals ...any) error { return level.Error(b.logger).Log(keyvals...) } + +func (b backend) ListGroups(max int) ([]*nntp.Group, error) { + return []*nntp.Group{group}, nil +} + +func (b *backend) GetGroup(name string) (*nntp.Group, error) { + if name != group.Name { + return nil, nntpserver.ErrNoSuchGroup + } + if err := b.refreshIndex(); err != nil { + return nil, err + } + + return group, nil +} + +func (b *backend) GetArticles(_ *nntp.Group, from, to int64) ([]nntpserver.NumberedArticle, error) { + if err := b.refreshIndex(); err != nil { + return nil, err + } + + numbered := make([]nntpserver.NumberedArticle, 0, len(b.index)) + for i := range b.index { + entry := b.index[i] + num := int64(i + 1) + if num >= from && num <= to { + article, err := makeArticle(entry) + if err != nil { + return nil, err + } + + numbered = append(numbered, nntpserver.NumberedArticle{ + Num: num, + Article: article, + }) + } + } + + return numbered, nil +} + +func (b *backend) GetArticle(_ *nntp.Group, messageID string) (*nntp.Article, error) { + if err := b.refreshIndex(); err != nil { + return nil, err + } + + for i := range b.index { + entry := b.index[i] + if entry.messageID() == messageID { + return makeArticle(entry) + } + } + + num, err := strconv.Atoi(messageID) + if err == nil && num <= len(b.index) { + return makeArticle(b.index[num-1]) + } + + return nil, nntpserver.ErrInvalidMessageID +} + +func (b backend) Post(article *nntp.Article) error { + indexFile, err := os.Open(path.Join(os.Getenv("HOME"), ".slog", "index")) + if err != nil { + return err + } + + entries, err := parseIndexFile(indexFile) + if err != nil { + return err + } + + postID, err := newPostID() + if err != nil { + return err + } + + entries = append(entries, indexEntry{ + id: postID, + ts: time.Now(), + title: article.Header.Get("Subject"), + }) + + file, err := os.Create(path.Join(os.Getenv("HOME"), ".slog", "posts", postID)) + if err != nil { + return err + } + defer func() { _ = file.Close() }() + + _, err = io.Copy(file, article.Body) + if err != nil { + return err + } + + return writeIndexFile(entries) +} + +func (b backend) Authorized() bool { return true } +func (b backend) AllowPost() bool { return true } +func (b backend) Authenticate(_, _ string) (nntpserver.Backend, error) { return nil, nil } + +type indexEntry struct { + id string + ts time.Time + title string + user string + author string +} + +const indexTimeFmt = "2006-01-02 15:04:05.999999" + +func (ie *indexEntry) UnmarshalJSON(b []byte) error { + var tgt struct { + Timestamp string + Id string + Title string + } + if err := json.Unmarshal(b, &tgt); err != nil { + return err + } + + ts, err := time.Parse(indexTimeFmt, tgt.Timestamp) + if err != nil { + return err + } + + ie.id = tgt.Id + ie.ts = ts + ie.title = tgt.Title + return nil +} + +func (ie *indexEntry) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]any{ + "id": ie.id, + "timestamp": ie.ts.Format(indexTimeFmt), + "title": ie.title, + }) +} + +func (ie indexEntry) messageID() string { + return fmt.Sprintf("<%s.%s>", ie.id, ie.author) +} + +func (b *backend) refreshIndex() error { + now := time.Now() + if b.lastRead.IsZero() || now.Sub(b.lastRead) > b.waitTime { + b.lastRead = now + } else { + return nil + } + + fsys := os.DirFS("/home") + indices, err := fs.Glob(fsys, "*/.slog/index") + if err != nil { + return err + } + + b.index = b.index[:0] + for _, index := range indices { + username := strings.SplitN(index, "/", 2)[0] + + file, err := fsys.Open(index) + if err != nil { + _ = b.warn( + "msg", "error opening index file", + "user", username, + "err", err, + ) + continue + } + + items, err := parseIndexFile(file) + if err != nil { + _ = b.warn( + "msg", "error parsing index file", + "user", username, + "err", err, + ) + continue + } + for i := range items { + items[i].user = username + items[i].author = username + "@ctrl-c.club" + } + b.index = append(b.index, items...) + } + + sort.Slice(b.index, func(i, j int) bool { + return b.index[i].ts.Before(b.index[j].ts) + }) + + group.High = int64(len(b.index)) + group.Count = group.High + + return nil +} + +func myIndexPath() string { + return path.Join(os.Getenv("HOME"), ".slog", "index") +} + +func parseIndexFile(file fs.File) ([]indexEntry, error) { + defer func() { _ = file.Close() }() + + var entries []indexEntry + if err := json.NewDecoder(file).Decode(&entries); err != nil { + return nil, err + } + return entries, nil +} + +func writeIndexFile(entries []indexEntry) error { + file, err := os.Create(myIndexPath()) + if err != nil { + return err + } + defer func() { _ = file.Close() }() + + return json.NewEncoder(file).Encode(entries) +} + +func makeArticle(entry indexEntry) (*nntp.Article, error) { + f, err := os.Open(fmt.Sprintf("/home/%s/.slog/posts/%s", entry.user, entry.id)) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + body := &bytes.Buffer{} + size, err := io.Copy(body, f) + if err != nil { + return nil, err + } + lines := bytes.Count(body.Bytes(), []byte{'\n'}) + + article := &nntp.Article{ + Header: textproto.MIMEHeader{ + "Message-Id": []string{entry.messageID()}, + "From": []string{entry.author}, + "Newsgroups": []string{group.Name}, + "Date": []string{entry.ts.Format(time.RFC1123Z)}, + "Subject": []string{entry.title}, + }, + Body: body, + Bytes: int(size), + Lines: lines, + } + + return article, nil +} + +func newPostID() (string, error) { + buf := make([]byte, 5) + _, err := rand.Read(buf) + if err != nil { + return "", err + } + return hex.EncodeToString(buf), nil +} |