108 Commits

Author SHA1 Message Date
Jon Staab fefc85d500 Add unbanpubkey and unallowpubkey 2026-05-05 11:36:42 -07:00
Jon Staab fbc805a5a6 Switch supported_nips to strings 2026-05-05 11:36:42 -07:00
fiatjaf a5aeff31d7 khatru: cancel existing subscription when a new one starts with the same id. 2026-05-04 19:26:24 -03:00
fiatjaf bf7998e780 khatru: OnListenerRemoved wasn't being called in the most common case of a connection dropped. 2026-05-04 13:13:02 -03:00
fiatjaf 61586d5d1b khatru: ForceSetAuthed() 2026-05-04 11:50:21 -03:00
fiatjaf c75bd45d13 Tags.Eq() 2026-05-04 11:50:21 -03:00
fiatjaf aafff41d40 mmm: rawread stats. 2026-05-03 13:48:19 -03:00
fiatjaf cbf335a8fa schema: dangling space is not a problem in content. 2026-05-03 13:47:23 -03:00
fiatjaf 05b426e67e khatru: add and remove listener hooks. 2026-04-29 19:32:47 -03:00
fiatjaf 744fb0702c relay.AssumeValid can be passed as an option, so it works from a Pool. 2026-04-23 22:16:47 -03:00
fiatjaf b899ef8865 faster signature verification by serializing directly into the sha with less allocations. 2026-04-23 22:16:46 -03:00
fiatjaf 696f377109 event verification benchmark. 2026-04-23 22:16:23 -03:00
fiatjaf e144b33fa2 khatru: use sync.Pool to minimize allocations of sets on dispatcher. 2026-04-23 20:14:18 -03:00
fiatjaf 42379e53a2 sdk: get rid of unused error returns in wot. 2026-04-23 08:25:55 -03:00
fiatjaf e2ad68d050 khatru: we haven't fixed the nil ws bug on dispatcher, but at least now we have more tests and an even more efficient architecture! 2026-04-22 23:16:43 -03:00
fiatjaf 223d95461f blossom/nsite tweaks. 2026-04-22 15:52:50 -03:00
fiatjaf 078ee94465 sdk: FetchBlossomServerList(). 2026-04-22 15:16:46 -03:00
fiatjaf a21ea55eaa nip5A: nsites. 2026-04-22 15:08:01 -03:00
fiatjaf 5b28d08e47 khatru: add tests and fix dispatcher. 2026-04-21 21:20:40 -03:00
fiatjaf 94ea432818 delete PoolOptions entirely (it should have been deleted earlier) and expose AuthRequiredHandler field. 2026-04-19 20:14:00 -03:00
fiatjaf 8200164174 don't print identifier in replaceable pointers when all is ok. 2026-04-18 15:02:58 -03:00
fiatjaf f50b7b0f8d khatru: list clients and client details. 2026-04-16 16:14:42 -03:00
fiatjaf 31473172a9 khatru: byAuthor and byKind as xsync maps. 2026-04-16 07:20:48 -03:00
fiatjaf d56bdba3ff khatru: WithServiceURL() subhandlers. 2026-04-15 21:19:03 -03:00
fiatjaf 7dc553f71b eventstore/bleve: when there is only one language we skip using the detector. 2026-04-14 21:38:43 -03:00
fiatjaf fbd4dddba3 eventstore/bleve: index some generic tags and references on all events. 2026-04-14 20:40:28 -03:00
fiatjaf c11e94a04b khatru: ReplaceEvent hook doesn't need the previous events. 2026-04-14 19:58:17 -03:00
fiatjaf 029f4eb0d8 pool: AddToPenaltyBox() manually. 2026-04-11 22:26:23 -03:00
fiatjaf cf734a3ac7 sdk: make a test compile without khatru's Start() 2026-04-11 22:26:05 -03:00
fiatjaf d92a0cde16 get rid of PoolOptions, just set fields on Pool directly. 2026-04-11 20:31:05 -03:00
fiatjaf 5944a3ead6 bleve is not a full eventstore.Store. 2026-04-11 20:27:12 -03:00
fiatjaf 3e35681cb9 Revert "relay: check for subscription limits and error."
This reverts commit 637412fd38.
2026-04-11 19:26:00 -03:00
fiatjaf 8515153df2 Revert "pool: open new connections whenever a subscription limit is reached, reuse multiple simultaneous relay connections."
This reverts commit 9bf9816c15.
2026-04-11 19:25:39 -03:00
fiatjaf 98fa53464e pool: proper filter scope in subMany() 2026-04-11 19:24:08 -03:00
fiatjaf 29cdd48fcb eventstore: adapt test to ReplaceEvent() signature change. 2026-04-11 15:36:09 -03:00
fiatjaf 181de14642 lmdb: two small fixes, and stop doing WRITEMAP so it stops crashing. 2026-04-11 15:24:37 -03:00
fiatjaf 1794f0690f bleve: open timeout option. 2026-04-11 02:03:58 -03:00
fiatjaf 12af4717d4 pool: penalty box as a xsync map because mutexes are too hard. 2026-04-11 01:55:18 -03:00
fiatjaf b989b66bb7 change ReplaceEvent() interface to return a list of the events deleted. 2026-04-10 11:56:08 -03:00
fiatjaf 4261bc88f8 eventstore/bleve: replace with implementation inspired from pyramid. 2026-04-10 11:25:42 -03:00
fiatjaf a8205a3790 add relay.primal.net as another fallback big relay for outbox. 2026-04-10 10:00:03 -03:00
fiatjaf 0152341144 eventstore/lmdb: remove unused lastIdx code. 2026-04-08 21:45:10 -03:00
fiatjaf 9bf9816c15 pool: open new connections whenever a subscription limit is reached, reuse multiple simultaneous relay connections. 2026-04-07 18:13:05 -03:00
fiatjaf 82f2fbdb99 sdk: a bunch of more list loaders. 2026-04-07 17:39:21 -03:00
fiatjaf d5b54a1c91 negentropy: fix varint encoding. 2026-04-07 17:20:10 -03:00
fiatjaf 637412fd38 relay: check for subscription limits and error. 2026-04-07 12:09:00 -03:00
fiatjaf 9b881801d8 khatru: get rid of broken unused get-started.go helpers. 2026-04-04 09:20:03 -03:00
fiatjaf 371cecdb84 guard against nil connections on write to protect against concurrent listener removals. 2026-04-03 12:30:06 -03:00
fiatjaf 2735abe060 khatru: listener needed a xsync.Map instead of a map, because of concurrent access. 2026-04-03 08:27:00 -03:00
fiatjaf b9a3e78752 mmm: print free ranges count. 2026-04-03 08:23:15 -03:00
fiatjaf ff03090610 schema: update url. 2026-04-03 08:23:04 -03:00
fiatjaf 72a5be58d7 fix AppendUnique. 2026-04-02 03:29:56 -03:00
fiatjaf 2c30300756 mmm: use flock instead of file presence for locking the database. 2026-04-02 03:29:56 -03:00
fiatjaf d1fdc262f2 mmm: reduce default mmap size. 2026-04-02 03:29:56 -03:00
fiatjaf 117a304f68 khatru: relay stats. 2026-04-02 03:29:56 -03:00
fiatjaf ac2d4579f1 khatru: get rid of subrelays + segregated indexed listeners. 2026-04-02 03:29:56 -03:00
fiatjaf 56610a32e6 constant ints must be casted so they work with gomobile. 2026-03-29 09:15:33 -03:00
fiatjaf d4940c7858 eventstore/mmm: use sync.Pool for tempResults. 2026-03-28 11:22:39 -03:00
fiatjaf 172e7890b9 khatru: use a channelmutex so we can fail to lock on addListener() if there's a disconnect. 2026-03-28 10:47:33 -03:00
fiatjaf 3acfbbca0a nip34: grasp url helper. 2026-03-26 17:36:01 -03:00
fiatjaf b5974cfa45 add nip34/git-natural-api, using the same approach as https://jsr.io/@fiatjaf/git-natural-api. 2026-03-25 15:58:31 -03:00
fiatjaf c74ac74a0e use sync.Once to prevent duplicated AUTH attempts on the same relays. 2026-03-25 15:28:17 -03:00
fiatjaf ec6f3f8a41 .Count() to handle CLOSED messages and support AUTH like .Subscribe(). 2026-03-25 11:46:36 -03:00
fiatjaf d43fbbf02d eventstore: fix lmdb and mmm ptag-kind queries.
the prefix was missing the 2 bytes of the kind when preparing the query.

we also simplify the query planner logic a little bit and eliminate some useless fields and everything is clearer.
2026-03-23 00:23:38 -03:00
fiatjaf 6a686c31af eventstore: add test for ptag filter with until clause.
following nostr:nevent1qvzqqqqqqypzq3svyhng9ld8sv44950j957j9vchdktj7cxumsep9mvvjthc2pjuqy0hwumn8ghj7urewfsk66ty9enxjct5dfskvtnrdakj76twvfhhsqgawaehxw309aehqct5d9sj6ctjvdskucfwvdhk6tmfde3x77qqyqspzxnz3g0sway64qpjkszxx0qv666sce30dymn5mw467j709zejjvtka2
2026-03-23 00:23:23 -03:00
fiatjaf a6fdcd8b30 blossom: display body when upload fails and no reason is given. 2026-03-22 12:49:59 -03:00
fiatjaf e675f04bd2 maybe this is not necessary? prevent WriteWithError from getting stuck. 2026-03-20 20:27:24 -03:00
fiatjaf 0630bbe4e9 fix another dead relay issue (relays dying but subscriptions living forever and not being reconnected). 2026-03-20 20:26:59 -03:00
fiatjaf 55c5194bdf schema: fix tests. 2026-03-20 20:26:12 -03:00
fiatjaf f3f5c3982d nip29: fix printing. 2026-03-20 20:10:07 -03:00
fiatjaf 1520264394 nip54: update d-tag normalization rules. 2026-03-18 19:12:40 -03:00
fiatjaf 2cec1c9434 eliminate readQueue stuck channel on relay close. 2026-03-14 05:53:16 -03:00
fiatjaf 6cbe984e16 eliminate closedMutex and closeNotify because they are useless (apparently). 2026-03-14 05:31:48 -03:00
fiatjaf 5a0b18e65a add buffers to read and write queues. 2026-03-12 17:14:18 -03:00
fiatjaf bb4093d834 nip29: supported_kinds and transition edit-metadata to be a PUT, not a PATCH. 2026-03-12 11:00:01 -03:00
fiatjaf 3bd059d1f9 nip29: livekit group live participants. 2026-03-11 16:06:36 -03:00
fiatjaf 681bd55e55 replace "no-text" with "supported_kinds". 2026-03-11 10:49:41 -03:00
fiatjaf 4e490879b5 khatru/policies: accept deletions even when they're not protected. 2026-03-09 22:37:26 -03:00
fiatjaf 4348c64b14 r.writeQueue doesn't have to be closed. 2026-03-08 22:58:36 -03:00
fiatjaf 2c0d9712e3 CompareRElayEvent() helpers. 2026-03-08 22:57:38 -03:00
fiatjaf 4719c0bc9f khatru/policies: PreventNormalDuplicates() to consider "a" reactions too. 2026-03-06 01:04:46 -03:00
fiatjaf 163e59e1f1 nip29: fix "livekit" tag parsing. 2026-03-05 22:46:20 -03:00
fiatjaf 21ce0046c0 nip29: bring back negative tags in edit-metadata. 2026-03-04 23:24:48 -03:00
fiatjaf 1d14e6bebe relay: fix inverted r.writeQueue check. 2026-03-04 00:01:04 -03:00
fiatjaf 23d525f067 another last guard against sending on closed channel. 2026-03-02 11:59:12 -03:00
fiatjaf 4dab261bdf close relays when sending a CLOSE message fails and ensure closed relays aren't used. 2026-03-01 09:44:53 -03:00
fiatjaf 44c429d6b1 ensure we fail subscriptions to closed relays. 2026-03-01 09:26:00 -03:00
fiatjaf 4b5c51ffc0 refactor unsub to be dependent on the context only and always. 2026-03-01 09:19:25 -03:00
fiatjaf 5de9501556 fixes. 2026-03-01 09:18:55 -03:00
fiatjaf 8ba05114cd remove relay from pool once it's closed or disconnected. 2026-02-28 14:26:32 -03:00
fiatjaf 1df85217d9 merge connection into relay, do all the closing logic on context cancelation and have closeMutex be a channelmutex. 2026-02-28 14:19:57 -03:00
fiatjaf 195cb944e2 fix potential subscription leaking bug with MaxWaitForEOSE never being effective. 2026-02-27 06:47:27 -03:00
fiatjaf c31b92707b trying to prevent leaking subscriptions. 2026-02-26 23:01:18 -03:00
fiatjaf 00ffe16cb7 nip29: add "no-text" and "livekit" tags. 2026-02-26 07:17:58 -03:00
fiatjaf 4d1b6c1df0 potential fix on some concurrency pool issues. 2026-02-23 15:22:21 -03:00
fiatjaf 62d15178ec sdk: update default relay lists. 2026-02-22 18:34:52 -03:00
fiatjaf 32dd39da81 sdk: fix default Publisher to work with any store. 2026-02-22 18:02:22 -03:00
fiatjaf 7aa127a8c3 use if ctx.Err() instead of select {}. 2026-02-22 17:45:36 -03:00
fiatjaf 55cc52876a khatru/policies: support "a" in kind:1163 on PreventNormalDuplicates() 2026-02-18 15:26:10 -03:00
fiatjaf 137c09369a khatru/policies: fix tagName usage in PreventNormalDuplicates() 2026-02-18 10:43:33 -03:00
fiatjaf d445ba9919 mmm: free ranges tracking improved with b.freeRangesLarge and b.freeRangesAll
one is unsorted and fast and we only care about it with picking a new free range.
the other is sorted and used when merging a new freed range with existing free ranges.
both are computed from the events id index at beginning, then tracked manually on each addition or deletion.
this change uncovered some errors so we fixed them and added some more fuzz test invariant checking.
code is simplified a little bit.
there was another thing I forgot.
2026-02-17 18:33:59 -03:00
fiatjaf d30c1bff46 khatru/policies: PreventNormalDuplicates() 2026-02-17 18:33:59 -03:00
fiatjaf 65ef1c50a7 khatru: OnEventDeleted hook. 2026-02-17 18:33:59 -03:00
fiatjaf 7a4b71b39b blossom: hardcode some more common problematic extension types. 2026-02-11 11:41:28 -03:00
fiatjaf 3f52d10421 nip77: fix one possible infinite loop with channels. 2026-02-09 18:38:39 -03:00
fiatjaf a98ac0d050 khatru: only kill connection once. 2026-02-08 10:59:51 -03:00
fiatjaf 28bef1c990 khatru: stop iterating through event results if ws fails. 2026-02-08 10:59:30 -03:00
fiatjaf beb8a72491 nip60: don't lose tokens when bolt11 payment fails. 2026-02-03 19:27:06 -03:00
131 changed files with 6972 additions and 2155 deletions
-158
View File
@@ -1,158 +0,0 @@
package nostr
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"net/http"
"net/textproto"
"sync/atomic"
"time"
ws "github.com/coder/websocket"
)
var ErrDisconnected = errors.New("<disconnected>")
type writeRequest struct {
msg []byte
answer chan error
}
func (r *Relay) newConnection(ctx context.Context, httpClient *http.Client) error {
debugLogf("{%s} connecting!\n", r.URL)
dialCtx := ctx
if _, ok := dialCtx.Deadline(); !ok {
// if no timeout is set, force it to 7 seconds
dialCtx, _ = context.WithTimeoutCause(ctx, 7*time.Second, errors.New("connection took too long"))
}
dialOpts := &ws.DialOptions{
HTTPHeader: http.Header{
textproto.CanonicalMIMEHeaderKey("User-Agent"): {"fiatjaf.com/nostr"},
},
CompressionMode: ws.CompressionContextTakeover,
HTTPClient: httpClient,
}
for k, v := range r.requestHeader {
dialOpts.HTTPHeader[k] = v
}
c, _, err := ws.Dial(dialCtx, r.URL, dialOpts)
if err != nil {
return err
}
c.SetReadLimit(2 << 24) // 33MB
// this will tell if the connection is closed
// ping every 29 seconds
ticker := time.NewTicker(29 * time.Second)
// main websocket loop
readQueue := make(chan string)
r.conn = c
r.writeQueue = make(chan writeRequest)
r.closed = &atomic.Bool{}
r.closedNotify = make(chan struct{})
go func() {
pingAttempt := 0
for {
select {
case <-ctx.Done():
r.closeConnection(ws.StatusNormalClosure, "")
debugLogf("{%s} closing!, context done: '%s'\n", r.URL, context.Cause(ctx))
return
case <-r.closedNotify:
return
case <-ticker.C:
debugLogf("{%s} pinging\n", r.URL)
ctx, cancel := context.WithTimeoutCause(ctx, time.Millisecond*800, errors.New("ping took too long"))
err := c.Ping(ctx)
cancel()
if err != nil {
pingAttempt++
debugLogf("{%s} error writing ping (attempt %d): %v", r.URL, pingAttempt, err)
if pingAttempt >= 3 {
debugLogf("{%s} error writing ping after multiple attempts; closing websocket", r.URL)
err = r.Close() // this should trigger a context cancelation
if err != nil {
debugLogf("{%s} failed to close relay: %v", r.URL, err)
}
}
continue
}
// ping was OK
debugLogf("{%s} ping OK", r.URL)
pingAttempt = 0
case wr := <-r.writeQueue:
debugLogf("{%s} sending '%v'\n", r.URL, string(wr.msg))
ctx, cancel := context.WithTimeoutCause(ctx, time.Second*10, errors.New("write took too long"))
err := c.Write(ctx, ws.MessageText, wr.msg)
cancel()
if err != nil {
debugLogf("{%s} closing!, write failed: '%s'\n", r.URL, err)
r.closeConnection(ws.StatusAbnormalClosure, "write failed")
if wr.answer != nil {
wr.answer <- err
}
return
}
if wr.answer != nil {
close(wr.answer)
}
case msg := <-readQueue:
debugLogf("{%s} received %v\n", r.URL, msg)
r.handleMessage(msg)
}
}
}()
// read loop -- loops back to the main loop
go func() {
buf := new(bytes.Buffer)
for {
buf.Reset()
_, reader, err := c.Reader(ctx)
if err != nil {
debugLogf("{%s} closing!, reader failure: '%s'\n", r.URL, err)
r.closeConnection(ws.StatusAbnormalClosure, "failed to get reader")
return
}
if _, err := io.Copy(buf, reader); err != nil {
debugLogf("{%s} closing!, read failure: '%s'\n", r.URL, err)
r.closeConnection(ws.StatusAbnormalClosure, "failed to read")
return
}
readQueue <- string(buf.Bytes())
}
}()
return nil
}
func (r *Relay) closeConnection(code ws.StatusCode, reason string) {
wasClosed := r.closed.Swap(true)
if !wasClosed {
r.conn.Close(code, reason)
r.connectionContextCancel(fmt.Errorf("doClose(): %s", reason))
r.closeMutex.Lock()
close(r.closedNotify)
close(r.writeQueue)
r.conn = nil
r.closeMutex.Unlock()
}
}
+1 -1
View File
@@ -17,7 +17,7 @@ func TestEOSEMadness(t *testing.T) {
}, SubscriptionOptions{})
assert.NoError(t, err)
timeout := time.After(3 * time.Second)
timeout := time.After(2 * time.Second)
n := 0
e := 0
+213 -13
View File
@@ -2,7 +2,9 @@ package nostr
import (
"crypto/sha256"
"hash"
"strconv"
"unsafe"
"github.com/mailru/easyjson"
"github.com/templexxx/xhex"
@@ -26,10 +28,17 @@ func (evt Event) String() string {
// GetID serializes and returns the event ID as a string.
func (evt Event) GetID() ID {
return sha256.Sum256(evt.Serialize())
var id ID
evt.serializedHash(&id)
return id
}
// CheckID checks if the implied ID matches the given ID more efficiently.
// SetID calculates and sets the id to the event in a single operation.
func (evt *Event) SetID() {
evt.serializedHash(&evt.ID)
}
// CheckID checks if the implied ID matches the currently assigned ID.
func (evt Event) CheckID() bool {
return evt.GetID() == evt.ID
}
@@ -38,17 +47,56 @@ func (evt Event) CheckID() bool {
func (evt Event) Serialize() []byte {
// the serialization process is just putting everything into a JSON array
// so the order is kept. See NIP-01
dst := make([]byte, 4+64, 100+len(evt.Content)+len(evt.Tags)*80)
dst := make([]byte, 0, 100+len(evt.Content)+len(evt.Tags)*80)
return evt.appendSerialized(dst)
}
// the header portion is easy to serialize
// [0,"pubkey",created_at,kind,[
copy(dst, `[0,"`)
xhex.Encode(dst[4:4+64], evt.PubKey[:]) // there will always be such capacity
var escTable [256]bool
// pre-built escape sequences; index by the offending byte.
var escSeq [256][2]byte
// pre-built []byte slices for hash.Write calls (no per-call allocation).
var escSlice [256][]byte
var (
jsonQuote = []byte{'"'}
serializedStart = []byte(`[0,"`)
serializedPubkeyEnd = []byte(`",`)
serializedTagsEnd = []byte("],")
serializedTagStart = []byte{'['}
serializedTagEnd = []byte{']'}
serializedComma = []byte{','}
serializedEnd = []byte{']'}
)
func init() {
for _, b := range []byte{'"', '\\', '\n', '\r', '\t'} {
escTable[b] = true
}
escSeq['"'] = [2]byte{'\\', '"'}
escSeq['\\'] = [2]byte{'\\', '\\'}
escSeq['\n'] = [2]byte{'\\', 'n'}
escSeq['\r'] = [2]byte{'\\', 'r'}
escSeq['\t'] = [2]byte{'\\', 't'}
for b, seq := range escSeq {
if escTable[b] {
escSlice[b] = seq[:]
}
}
}
func (evt Event) appendSerialized(dst []byte) []byte {
start := len(dst)
dst = append(dst, `[0,"`...)
dst = append(dst, make([]byte, 64)...)
xhex.Encode(dst[start+4:start+4+64], evt.PubKey[:])
dst = append(dst, `",`...)
dst = append(dst, strconv.FormatInt(int64(evt.CreatedAt), 10)...)
dst = append(dst, `,`...)
dst = append(dst, strconv.FormatUint(uint64(evt.Kind), 10)...)
dst = append(dst, `,`...)
dst = strconv.AppendInt(dst, int64(evt.CreatedAt), 10)
dst = append(dst, ',')
dst = strconv.AppendUint(dst, uint64(evt.Kind), 10)
dst = append(dst, ',')
// tags
dst = append(dst, '[')
@@ -62,15 +110,167 @@ func (evt Event) Serialize() []byte {
if i > 0 {
dst = append(dst, ',')
}
dst = escapeString(dst, s)
dst = appendJSONString(dst, s)
}
dst = append(dst, ']')
}
dst = append(dst, "],"...)
// content needs to be escaped in general as it is user generated.
dst = escapeString(dst, evt.Content)
dst = appendJSONString(dst, evt.Content)
dst = append(dst, ']')
return dst
}
func (evt Event) serializedHash(dst *ID) {
h := sha256.New()
h.Write(serializedStart)
var pubkeyHex [64]byte
xhex.Encode(pubkeyHex[:], evt.PubKey[:])
h.Write(pubkeyHex[:])
h.Write(serializedPubkeyEnd)
var numBuf [20]byte
b := strconv.AppendInt(numBuf[:0], int64(evt.CreatedAt), 10)
h.Write(b)
h.Write(serializedComma)
b = strconv.AppendUint(numBuf[:0], uint64(evt.Kind), 10)
h.Write(b)
h.Write(serializedComma)
h.Write(serializedTagStart)
for i, tag := range evt.Tags {
if i > 0 {
h.Write(serializedComma)
}
h.Write(serializedTagStart)
for j, s := range tag {
if j > 0 {
h.Write(serializedComma)
}
writeJSONString(h, s)
}
h.Write(serializedTagEnd)
}
h.Write(serializedTagsEnd)
writeJSONString(h, evt.Content)
h.Write(serializedEnd)
h.Sum((*dst)[:0])
}
// ── SWAR helper ──────────────────────────────────────────────────────────────
// hasSpecial returns non-zero if any byte in w is one of: \t 0x09, \n 0x0A,
// " 0x22, \ 0x5C. Uses the classic "hasvalue" bit-trick — no branches, no
// memory, pure ALU. Works regardless of endianness because we only care
// whether a match exists, not where.
//
//go:nosplit
func hasSpecial(w uint64) bool {
match := func(w, v uint64) uint64 {
x := w ^ (0x0101010101010101 * v)
return (x - 0x0101010101010101) & ^x & 0x8080808080808080
}
return match(w, 0x09)|match(w, 0x0A)|match(w, 0x0D)|match(w, 0x22)|match(w, 0x5C) != 0
}
func appendJSONString(dst []byte, s string) []byte {
dst = append(dst, '"')
n := len(s)
if n == 0 {
return append(dst, '"')
}
base := uintptr(unsafe.Pointer(unsafe.StringData(s)))
start, i := 0, 0
// consume 8 bytes at a time;
// if the whole word is clean, advance without touching dst at all;
// but when a word is dirty, fall back to the byte loop only for that 8-byte window
for i+8 <= n {
w := *(*uint64)(unsafe.Pointer(base + uintptr(i)))
if hasSpecial(w) {
for end := i + 8; i < end; i++ {
if escTable[s[i]] {
// append everything since the start or the last time we did this up to here
dst = append(dst, s[start:i]...)
// append this special sequence
seq := escSeq[s[i]]
dst = append(dst, seq[0], seq[1])
// set this as a checkpoint
start = i + 1
}
}
} else {
i += 8
}
}
// scalar tail for the remaining <8 bytes (same logic used for the hasSpecial branch above)
for ; i < n; i++ {
if escTable[s[i]] {
dst = append(dst, s[start:i]...)
seq := escSeq[s[i]]
dst = append(dst, seq[0], seq[1])
start = i + 1
}
}
// add the remaining chunk (in a string without any specials this will add everything at once)
dst = append(dst, s[start:]...)
return append(dst, '"')
}
func writeJSONString(h hash.Hash, s string) {
h.Write(jsonQuote)
n := len(s)
if n == 0 {
h.Write(jsonQuote)
return
}
base := uintptr(unsafe.Pointer(unsafe.StringData(s)))
start, i := 0, 0
for i+8 <= n {
w := *(*uint64)(unsafe.Pointer(base + uintptr(i)))
// apply same logic as of appendJSONString()
if hasSpecial(w) {
for end := i + 8; i < end; i++ {
if escTable[s[i]] {
if i > start {
h.Write(unsafe.Slice(unsafe.StringData(s[start:i]), i-start))
}
h.Write(escSlice[s[i]])
start = i + 1
}
}
} else {
i += 8
}
}
for ; i < n; i++ {
if escTable[s[i]] {
if i > start {
h.Write(unsafe.Slice(unsafe.StringData(s[start:i]), i-start))
}
h.Write(escSlice[s[i]])
start = i + 1
}
}
if start < n {
h.Write(unsafe.Slice(unsafe.StringData(s[start:]), len(s)-start))
}
h.Write(jsonQuote)
}
+48 -18
View File
@@ -1,8 +1,12 @@
package nostr
import (
"bufio"
"bytes"
"fmt"
"io"
"math/rand/v2"
"os"
"testing"
"github.com/stretchr/testify/assert"
@@ -102,23 +106,49 @@ func TestIDCheck(t *testing.T) {
}
}
func BenchmarkIDCheck(b *testing.B) {
evt := Event{
CreatedAt: Timestamp(rand.Int64N(9999999)),
Content: fmt.Sprintf("hello"),
Tags: Tags{},
func BenchmarkEventVerifySignatureJSONL(b *testing.B) {
events := loadBenchmarkEvents(b)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for _, evt := range events {
if !evt.VerifySignature() {
b.Fatal("signature verification failed")
}
}
}
evt.Sign(Generate())
b.Run("naïve", func(b *testing.B) {
for b.Loop() {
_ = evt.GetID() == evt.ID
}
})
b.Run("big brain", func(b *testing.B) {
for b.Loop() {
_ = evt.CheckID()
}
})
}
func loadBenchmarkEvents(b *testing.B) []Event {
b.Helper()
f, err := os.Open("testdata/events.jsonl")
require.NoError(b, err)
b.Cleanup(func() { _ = f.Close() })
r := bufio.NewReader(f)
events := make([]Event, 0, 1024)
for {
line, err := r.ReadBytes('\n')
if err != nil && err != io.EOF {
require.NoError(b, err)
}
line = bytes.TrimSpace(line)
if len(line) != 0 {
var evt Event
require.NoError(b, json.Unmarshal(line, &evt))
require.True(b, evt.VerifySignature(), "fixture contains invalid signature")
events = append(events, evt)
}
if err == io.EOF {
break
}
}
require.NotEmpty(b, events)
return events
}
-2
View File
@@ -29,8 +29,6 @@ type Store interface {
}
```
[![Go Reference](https://pkg.go.dev/badge/fiatjaf.com/nostr/eventstore.svg)](https://pkg.go.dev/fiatjaf.com/nostr/eventstore) [![Run Tests](https://fiatjaf.com/nostr/eventstore/actions/workflows/test.yml/badge.svg)](https://fiatjaf.com/nostr/eventstore/actions/workflows/test.yml)
## Available Implementations
- **bleve**: Full-text search and indexing using the Bleve search library
+4 -1
View File
@@ -7,6 +7,7 @@ import (
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore/lmdb"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestBleveFlow(t *testing.T) {
@@ -21,7 +22,9 @@ func TestBleveFlow(t *testing.T) {
Path: "/tmp/blevetest-bleve",
RawEventStore: bb,
}
bl.Init()
err := bl.Init()
require.NoError(t, err, "init")
defer bl.Close()
willDelete := make([]nostr.Event, 0, 3)
-9
View File
@@ -1,9 +0,0 @@
package bleve
import (
"fiatjaf.com/nostr"
)
func (b *BleveBackend) DeleteEvent(id nostr.ID) error {
return b.index.Delete(id.Hex())
}
-9
View File
@@ -1,9 +0,0 @@
package bleve
const (
idField = "i"
contentField = "c"
kindField = "k"
createdAtField = "a"
pubkeyField = "p"
)
+104
View File
@@ -0,0 +1,104 @@
package bleve
import (
"strings"
"unicode"
)
// lexer tokenizes the input string
type Lexer struct {
input string
pos int
peekedQueue []Token
}
func NewLexer(input string) *Lexer {
return &Lexer{input: input, pos: 0}
}
func (l *Lexer) peek() rune {
if l.pos >= len(l.input) {
return 0
}
return rune(l.input[l.pos])
}
func (l *Lexer) advance() rune {
if l.pos >= len(l.input) {
return 0
}
ch := rune(l.input[l.pos])
l.pos++
return ch
}
func (l *Lexer) skipWhitespace() {
for l.peek() != 0 && unicode.IsSpace(l.peek()) {
l.advance()
}
}
func (l *Lexer) readWord() string {
start := l.pos
// read regular word (alphanumeric, hyphens, underscores)
for l.peek() != 0 && !unicode.IsSpace(l.peek()) &&
l.peek() != '(' && l.peek() != ')' && l.peek() != '"' {
l.advance()
}
return l.input[start:l.pos]
}
func (l *Lexer) PeekToken() Token {
next := l.NextToken()
l.peekedQueue = append(l.peekedQueue, next)
return next
}
func (l *Lexer) ReturnToken(tok Token) {
l.peekedQueue = append(l.peekedQueue, tok)
}
func (l *Lexer) NextToken() (tok Token) {
if len(l.peekedQueue) > 0 {
next := l.peekedQueue[len(l.peekedQueue)-1]
l.peekedQueue = l.peekedQueue[0 : len(l.peekedQueue)-1]
return next
}
l.skipWhitespace()
if l.pos >= len(l.input) {
return Token{Type: TokenEOF}
}
ch := l.peek()
switch ch {
case '(':
l.advance()
return Token{Type: TokenLParen, Value: "("}
case ')':
l.advance()
return Token{Type: TokenRParen, Value: ")"}
case '"':
l.advance()
return Token{Type: TokenQuote, Value: "\""}
default:
word := l.readWord()
upperWord := strings.ToUpper(word)
switch upperWord {
case "OR", "||":
return Token{Type: TokenOR, Value: word}
case "AND", "&&":
return Token{Type: TokenAND, Value: word}
case "NOT", "!":
return Token{Type: TokenNOT, Value: word}
default:
return Token{Type: TokenWord, Value: word}
}
}
}
+435 -15
View File
@@ -1,34 +1,101 @@
package bleve
import (
"encoding/json"
"errors"
"fmt"
"iter"
"slices"
"strconv"
"strings"
"sync"
"time"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore"
"fiatjaf.com/nostr/nip27"
"fiatjaf.com/nostr/nip73"
"fiatjaf.com/nostr/sdk"
bleve "github.com/blevesearch/bleve/v2"
_ "github.com/blevesearch/bleve/v2/analysis/analyzer/simple"
_ "github.com/blevesearch/bleve/v2/analysis/lang/ar"
_ "github.com/blevesearch/bleve/v2/analysis/lang/cjk"
_ "github.com/blevesearch/bleve/v2/analysis/lang/da"
_ "github.com/blevesearch/bleve/v2/analysis/lang/de"
_ "github.com/blevesearch/bleve/v2/analysis/lang/en"
_ "github.com/blevesearch/bleve/v2/analysis/lang/es"
_ "github.com/blevesearch/bleve/v2/analysis/lang/fa"
_ "github.com/blevesearch/bleve/v2/analysis/lang/fi"
_ "github.com/blevesearch/bleve/v2/analysis/lang/fr"
_ "github.com/blevesearch/bleve/v2/analysis/lang/gl"
_ "github.com/blevesearch/bleve/v2/analysis/lang/hi"
_ "github.com/blevesearch/bleve/v2/analysis/lang/hr"
_ "github.com/blevesearch/bleve/v2/analysis/lang/hu"
_ "github.com/blevesearch/bleve/v2/analysis/lang/in"
_ "github.com/blevesearch/bleve/v2/analysis/lang/it"
_ "github.com/blevesearch/bleve/v2/analysis/lang/nl"
_ "github.com/blevesearch/bleve/v2/analysis/lang/no"
_ "github.com/blevesearch/bleve/v2/analysis/lang/pl"
_ "github.com/blevesearch/bleve/v2/analysis/lang/pt"
_ "github.com/blevesearch/bleve/v2/analysis/lang/ro"
_ "github.com/blevesearch/bleve/v2/analysis/lang/ru"
_ "github.com/blevesearch/bleve/v2/analysis/lang/sv"
_ "github.com/blevesearch/bleve/v2/analysis/lang/tr"
bleveMapping "github.com/blevesearch/bleve/v2/mapping"
bleveQuery "github.com/blevesearch/bleve/v2/search/query"
"github.com/pemistahl/lingua-go"
)
var _ eventstore.Store = (*BleveBackend)(nil)
const (
labelContentField = "c"
labelKindField = "k"
labelCreatedAtField = "a"
labelAuthorField = "p"
labelReferencesField = "r"
labelExtrasField = "x"
)
var SupportedLanguages = []lingua.Language{
// each of these translates to a specific bleve analyzer
// except for japanese-korean-chinese that all use the same "cjk" analyzer
lingua.Arabic,
lingua.Chinese,
lingua.Croatian,
lingua.Danish,
lingua.Dutch,
lingua.English,
lingua.Finnish,
lingua.French,
lingua.German,
lingua.Hindi,
lingua.Hungarian,
lingua.Italian,
lingua.Japanese,
lingua.Korean,
lingua.Persian,
lingua.Polish,
lingua.Portuguese,
lingua.Romanian,
lingua.Russian,
lingua.Spanish,
lingua.Swedish,
lingua.Turkish,
}
type BleveBackend struct {
sync.Mutex
// Path is where the index will be saved
Path string
// RawEventStore is where we'll fetch the raw events from
// bleve will only store ids, so the actual events must be somewhere else
Path string
RawEventStore eventstore.Store
ReadOnly bool
OpenTimeout time.Duration
index bleve.Index
}
IndexableKinds []nostr.Kind
func (b *BleveBackend) Close() {
if b.index != nil {
b.index.Close()
}
Languages []lingua.Language
languageCodes []string
index bleve.Index
detector lingua.LanguageDetector
}
func (b *BleveBackend) Init() error {
@@ -38,12 +105,94 @@ func (b *BleveBackend) Init() error {
if b.RawEventStore == nil {
return fmt.Errorf("missing RawEventStore")
}
if len(b.Languages) == 0 {
return fmt.Errorf("missing Languages")
}
if len(b.IndexableKinds) == 0 {
b.IndexableKinds = []nostr.Kind{0, 1, 6, 11, 16, 20, 21, 22, 24, 1111, 9802, 30023, 30818}
}
// try to open existing index
index, err := bleve.Open(b.Path)
validLanguages := make([]lingua.Language, 0, len(b.Languages))
b.languageCodes = make([]string, 0, len(b.Languages))
for _, lang := range b.Languages {
var code string
switch lang {
case lingua.Chinese, lingua.Korean, lingua.Japanese:
code = "cjk"
default:
code = strings.ToLower(lang.IsoCode639_1().String())
}
if slices.Contains(b.languageCodes, code) {
continue
}
validLanguages = append(validLanguages, lang)
b.languageCodes = append(b.languageCodes, code)
}
b.Languages = validLanguages
opts := map[string]any{
"read_only": b.ReadOnly,
}
if b.OpenTimeout != 0 {
opts["bolt_timeout"] = b.OpenTimeout.String()
}
index, err := bleve.OpenUsing(b.Path, opts)
if err == bleve.ErrorIndexPathDoesNotExist {
// create new index with default mapping
mapping := bleveMapping.NewIndexMapping()
mapping.DefaultMapping.Dynamic = false
doc := bleveMapping.NewDocumentStaticMapping()
for _, code := range b.languageCodes {
contentField := bleveMapping.NewTextFieldMapping()
contentField.Analyzer = code
contentField.Store = false
contentField.IncludeTermVectors = false
contentField.DocValues = false
contentField.IncludeInAll = false
doc.AddFieldMappingsAt(labelContentField+"_"+code, contentField)
}
extrasField := bleveMapping.NewTextFieldMapping()
extrasField.Analyzer = "simple"
extrasField.Store = false
extrasField.IncludeTermVectors = false
extrasField.DocValues = false
extrasField.IncludeInAll = false
doc.AddFieldMappingsAt(labelExtrasField, extrasField)
referencesField := bleveMapping.NewKeywordFieldMapping()
referencesField.DocValues = false
referencesField.Store = false
referencesField.IncludeTermVectors = false
referencesField.IncludeInAll = false
doc.AddFieldMappingsAt(labelReferencesField, referencesField)
authorField := bleveMapping.NewKeywordFieldMapping()
authorField.DocValues = false
authorField.Store = false
authorField.IncludeTermVectors = false
doc.AddFieldMappingsAt(labelAuthorField, authorField)
kindField := bleveMapping.NewKeywordFieldMapping()
kindField.DocValues = false
kindField.Store = false
kindField.IncludeTermVectors = false
kindField.IncludeInAll = false
doc.AddFieldMappingsAt(labelKindField, kindField)
timestampField := bleveMapping.NewDateTimeFieldMapping()
timestampField.DocValues = false
timestampField.Store = false
timestampField.IncludeTermVectors = false
timestampField.IncludeInAll = false
doc.AddFieldMappingsAt(labelCreatedAtField, timestampField)
mapping.AddDocumentMapping("_default", doc)
index, err = bleve.New(b.Path, mapping)
if err != nil {
return fmt.Errorf("error creating index: %w", err)
@@ -53,6 +202,126 @@ func (b *BleveBackend) Init() error {
}
b.index = index
if len(b.Languages) >= 2 {
b.detector = lingua.NewLanguageDetectorBuilder().
FromLanguages(b.Languages...).
Build()
}
return nil
}
func (b *BleveBackend) Close() {
if b != nil && b.index != nil {
b.index.Close()
}
}
func (b *BleveBackend) SaveEvent(event nostr.Event) error {
if slices.Contains(b.IndexableKinds, event.Kind) {
return b.indexEvent(event)
}
return nil
}
func (b *BleveBackend) DeleteEvent(id nostr.ID) error {
if b != nil && b.index != nil {
return b.index.Delete(id.Hex())
}
return nil
}
func (b *BleveBackend) indexEvent(evt nostr.Event) error {
docID := evt.ID
var references []string
var extras string
switch evt.Kind {
case 6, 16:
var innerEvt nostr.Event
if err := json.Unmarshal([]byte(evt.Content), &innerEvt); err != nil || !innerEvt.VerifySignature() {
return nil
}
evt = innerEvt
case 0:
var pm sdk.ProfileMetadata
if err := json.Unmarshal([]byte(evt.Content), &pm); err == nil {
evt.Content = pm.Name + "\n" + pm.DisplayName + "\n" + pm.About
references = append(references, pm.NIP05)
}
}
for _, tag := range evt.Tags {
if len(tag) < 2 {
continue
}
switch tag[0] {
case "comment", "name", "title", "about", "description":
evt.Content += "\n\n" + tag[1]
case "e":
if ptr, err := nostr.EventPointerFromTag(tag); err == nil {
references = append(references, ptr.AsTagReference())
}
case "a":
if ptr, err := nostr.EntityPointerFromTag(tag); err == nil {
references = append(references, ptr.AsTagReference())
}
case "r":
references = append(references, tag[1])
}
}
doc := map[string]any{
labelKindField: strconv.Itoa(int(evt.Kind)),
labelAuthorField: evt.PubKey.Hex()[56:],
labelCreatedAtField: evt.CreatedAt.Time(),
}
content := strings.Builder{}
content.Grow(len(evt.Content))
for block := range nip27.Parse(evt.Content) {
if block.Pointer == nil {
content.WriteString(strings.TrimSpace(block.Text))
} else {
references = append(references, block.Pointer.AsTagReference())
if ep, ok := block.Pointer.(nip73.ExternalPointer); ok {
extras += ep.Thing + " "
}
}
}
indexableContent := content.String()
var lang lingua.Language
if len(b.Languages) == 1 {
lang = b.Languages[0]
} else {
var ok bool
lang, ok = b.detector.DetectLanguageOf(indexableContent)
if !ok {
lang = lingua.English
}
}
var analyzerLangCode string
switch lang {
case lingua.Japanese, lingua.Chinese, lingua.Korean:
analyzerLangCode = "cjk"
default:
analyzerLangCode = strings.ToLower(lang.IsoCode639_1().String())
}
doc[labelContentField+"_"+analyzerLangCode] = indexableContent
doc[labelReferencesField] = references
doc[labelExtrasField] = extras
if err := b.index.Index(docID.Hex(), doc); err != nil {
return fmt.Errorf("failed to index '%s' document: %w", docID.Hex(), err)
}
return nil
}
@@ -64,3 +333,154 @@ func (b *BleveBackend) CountEvents(filter nostr.Filter) (uint32, error) {
return 0, errors.New("not supported")
}
func (b *BleveBackend) QueryEvents(filter nostr.Filter, maxLimit int) iter.Seq[nostr.Event] {
return func(yield func(nostr.Event) bool) {
if tlimit := filter.GetTheoreticalLimit(); tlimit == 0 {
return
} else if tlimit < maxLimit {
maxLimit = tlimit
}
filter.Search = strings.TrimSpace(filter.Search)
if len(filter.Search) < 2 {
return
}
and := make([]bleveQuery.Query, 0, 3)
searchC := strings.Builder{}
searchC.Grow(len(filter.Search))
for block := range nip27.Parse(filter.Search) {
if block.Pointer != nil {
genericRef := bleve.NewTermQuery(block.Pointer.AsTagReference())
genericRef.SetField(labelReferencesField)
genericRef.SetBoost(2)
var ref bleveQuery.Query = genericRef
if profile, ok := block.Pointer.(nostr.ProfilePointer); ok {
authorQuery := bleve.NewTermQuery(profile.PublicKey.Hex()[56:])
authorQuery.SetField(labelAuthorField)
authorQuery.SetBoost(2)
orRef := bleve.NewDisjunctionQuery()
orRef.AddQuery(genericRef)
orRef.AddQuery(authorQuery)
ref = orRef
} else if addr, ok := block.Pointer.(nostr.EntityPointer); ok {
authorQuery := bleve.NewTermQuery(addr.PublicKey.Hex()[56:])
authorQuery.SetField(labelAuthorField)
authorQuery.SetBoost(2)
orRef := bleve.NewDisjunctionQuery()
orRef.AddQuery(genericRef)
orRef.AddQuery(authorQuery)
ref = orRef
}
and = append(and, ref)
} else {
searchC.WriteString(strings.TrimSpace(block.Text))
}
}
searchContent := searchC.String()
var exactMatches []string
if len(searchContent) > 0 {
contentQueries := make([]bleveQuery.Query, 0, len(b.Languages)+1)
searchQ, exactMatches_, err := parse(searchContent, labelContentField+"_"+b.languageCodes[0])
if err != nil {
for _, code := range b.languageCodes {
match := bleve.NewMatchQuery(searchContent)
match.SetField(labelContentField + "_" + code)
contentQueries = append(contentQueries, match)
}
} else {
contentQueries = append(contentQueries, searchQ)
for _, code := range b.languageCodes[1:] {
searchQ, _, _ := parse(searchContent, labelContentField+"_"+code)
contentQueries = append(contentQueries, searchQ)
}
}
exactMatches = exactMatches_
extrasQ := bleve.NewMatchQuery(searchContent)
extrasQ.SetField(labelExtrasField)
contentQueries = append(contentQueries, extrasQ)
and = append(and, bleveQuery.NewDisjunctionQuery(contentQueries))
}
if len(filter.Kinds) > 0 {
eitherKind := bleve.NewDisjunctionQuery()
for _, kind := range filter.Kinds {
kindQ := bleve.NewTermQuery(strconv.Itoa(int(kind)))
kindQ.SetField(labelKindField)
eitherKind.AddQuery(kindQ)
}
and = append(and, eitherKind)
}
if len(filter.Authors) > 0 {
eitherPubkey := bleve.NewDisjunctionQuery()
for _, pubkey := range filter.Authors {
pubkeyQ := bleve.NewTermQuery(pubkey.Hex()[56:])
pubkeyQ.SetField(labelAuthorField)
eitherPubkey.AddQuery(pubkeyQ)
}
and = append(and, eitherPubkey)
}
if filter.Since != 0 || filter.Until != 0 {
var min time.Time
if filter.Since != 0 {
min = filter.Since.Time()
}
var max time.Time
if filter.Until != 0 {
max = filter.Until.Time()
} else {
max = time.Now()
}
dateRangeQ := bleve.NewDateRangeQuery(min, max)
dateRangeQ.SetField(labelCreatedAtField)
and = append(and, dateRangeQ)
}
q := bleveQuery.NewConjunctionQuery(and)
req := bleve.NewSearchRequest(q)
req.Size = maxLimit
req.From = 0
req.Explain = true
result, err := b.index.Search(req)
if err != nil {
return
}
resultHit:
for _, hit := range result.Hits {
id, err := nostr.IDFromHex(hit.ID)
if err != nil {
continue
}
for evt := range b.RawEventStore.QueryEvents(nostr.Filter{IDs: []nostr.ID{id}}, 1) {
for _, exactMatch := range exactMatches {
if !strings.Contains(strings.ToLower(evt.Content), exactMatch) {
continue resultHit
}
}
for f, v := range filter.Tags {
if !evt.Tags.ContainsAny(f, v) {
continue resultHit
}
}
if !yield(evt) {
return
}
}
}
}
}
-94
View File
@@ -1,94 +0,0 @@
package bleve
import (
"iter"
"strconv"
"fiatjaf.com/nostr"
bleve "github.com/blevesearch/bleve/v2"
"github.com/blevesearch/bleve/v2/search/query"
)
func (b *BleveBackend) QueryEvents(filter nostr.Filter, maxLimit int) iter.Seq[nostr.Event] {
return func(yield func(nostr.Event) bool) {
if tlimit := filter.GetTheoreticalLimit(); tlimit == 0 {
return
} else if tlimit < maxLimit {
maxLimit = tlimit
}
if len(filter.Search) < 2 {
return
}
searchQ := bleve.NewMatchQuery(filter.Search)
searchQ.SetField(contentField)
var q query.Query = searchQ
conjQueries := []query.Query{searchQ}
if len(filter.Kinds) > 0 {
eitherKind := bleve.NewDisjunctionQuery()
for _, kind := range filter.Kinds {
kindQ := bleve.NewTermQuery(strconv.Itoa(int(kind)))
kindQ.SetField(kindField)
eitherKind.AddQuery(kindQ)
}
conjQueries = append(conjQueries, eitherKind)
}
if len(filter.Authors) > 0 {
eitherPubkey := bleve.NewDisjunctionQuery()
for _, pubkey := range filter.Authors {
if len(pubkey) != 64 {
continue
}
pubkeyQ := bleve.NewTermQuery(pubkey.Hex()[56:])
pubkeyQ.SetField(pubkeyField)
eitherPubkey.AddQuery(pubkeyQ)
}
conjQueries = append(conjQueries, eitherPubkey)
}
if filter.Since != 0 || filter.Until != 0 {
var min *float64
if filter.Since != 0 {
minVal := float64(filter.Since)
min = &minVal
}
var max *float64
if filter.Until != 0 {
maxVal := float64(filter.Until)
max = &maxVal
}
dateRangeQ := bleve.NewNumericRangeInclusiveQuery(min, max, nil, nil)
dateRangeQ.SetField(createdAtField)
conjQueries = append(conjQueries, dateRangeQ)
}
if len(conjQueries) > 1 {
q = bleve.NewConjunctionQuery(conjQueries...)
}
req := bleve.NewSearchRequest(q)
req.Size = maxLimit
req.From = 0
result, err := b.index.Search(req)
if err != nil {
return
}
for _, hit := range result.Hits {
id, err := nostr.IDFromHex(hit.ID)
if err != nil {
continue
}
for evt := range b.RawEventStore.QueryEvents(nostr.Filter{IDs: []nostr.ID{id}}, 1) {
if !yield(evt) {
return
}
}
}
}
}
+209
View File
@@ -0,0 +1,209 @@
package bleve
import (
"strings"
bleve "github.com/blevesearch/bleve/v2"
bleveQuery "github.com/blevesearch/bleve/v2/search/query"
)
// token types
type TokenType int
const (
TokenWord TokenType = iota
TokenOR
TokenAND
TokenNOT
TokenLParen
TokenRParen
TokenQuote
TokenEOF
)
type Token struct {
Type TokenType
Value string
}
type Parser struct {
lexer *Lexer
field string
}
func parse(input string, field string) (bleveQuery.Query, []string, error) {
lexer := NewLexer(input)
p := &Parser{
lexer: lexer,
}
var exactMatches []string
var reusableCurrentMatch strings.Builder
var currentExactMatch *strings.Builder
var currentWords []string
var negated bool
var parents []bleveQuery.Query
var parentOps []TokenType // tracks if parent should be AND or OR
var lastOp TokenType = TokenAND // track last operator for parentheses
curr := bleve.NewBooleanQuery()
for {
token := p.lexer.NextToken()
if token.Type == TokenEOF {
if len(currentWords) > 0 {
match := bleve.NewMatchQuery(strings.Join(currentWords, " "))
match.SetOperator(bleveQuery.MatchQueryOperatorAnd)
match.SetField(field)
if negated {
curr.AddMustNot(match)
} else {
curr.AddMust(match)
}
}
break
}
if token.Type == TokenQuote {
if currentExactMatch == nil {
currentExactMatch = &reusableCurrentMatch
} else {
exactMatches = append(exactMatches, currentExactMatch.String())
currentExactMatch.Reset()
reusableCurrentMatch = *currentExactMatch
currentExactMatch = nil
}
continue
}
if currentExactMatch != nil {
if currentExactMatch.Len() > 0 {
currentExactMatch.WriteByte(' ')
}
currentExactMatch.WriteString(strings.ToLower(token.Value))
currentWords = append(currentWords, token.Value)
continue
}
if token.Type == TokenWord {
currentWords = append(currentWords, token.Value)
continue
} else if len(currentWords) > 0 {
match := bleve.NewMatchQuery(strings.Join(currentWords, " "))
match.SetOperator(bleveQuery.MatchQueryOperatorAnd)
match.SetField(field)
if negated {
curr.AddMustNot(match)
} else {
curr.AddMust(match)
}
currentWords = currentWords[:0]
negated = false
}
switch token.Type {
case TokenLParen:
// push current query to parents stack with the last operator
parents = append(parents, curr)
parentOps = append(parentOps, lastOp)
// reset lastOp to default for inner parentheses
lastOp = TokenAND
// start new boolean query for parentheses content
curr = bleve.NewBooleanQuery()
continue
case TokenRParen:
// finalize any remaining words
if len(currentWords) > 0 {
match := bleve.NewMatchQuery(strings.Join(currentWords, " "))
match.SetOperator(bleveQuery.MatchQueryOperatorAnd)
match.SetField(field)
if negated {
curr.AddMustNot(match)
} else {
curr.AddMust(match)
}
currentWords = currentWords[:0]
negated = false
}
// pop parent and merge with current
if len(parents) > 0 {
parent := parents[len(parents)-1]
op := parentOps[len(parentOps)-1]
// create a new boolean query to combine parent and current
var combined bleveQuery.Query
switch op {
case TokenOR:
or := bleve.NewDisjunctionQuery()
or.AddQuery(parent)
or.AddQuery(curr)
combined = or
case TokenAND:
and := bleve.NewConjunctionQuery()
and.AddQuery(parent)
and.AddQuery(curr)
combined = and
}
curr = bleve.NewBooleanQuery()
curr.AddMust(combined)
parents = parents[:len(parents)-1]
parentOps = parentOps[:len(parentOps)-1]
}
continue
}
next := p.lexer.NextToken()
following := p.lexer.PeekToken()
if next.Type == TokenNOT {
negated = true
}
switch token.Type {
case TokenOR:
if next.Type != TokenLParen && !(next.Type == TokenNOT && following.Type == TokenLParen) {
// if this is not followed by a "(" or "NOT (" consider the follow next word as the only parameter
other := bleve.NewMatchQuery(next.Value)
other.SetOperator(bleveQuery.MatchQueryOperatorAnd)
other.SetField(field)
or := bleve.NewDisjunctionQuery()
or.AddQuery(curr)
or.AddQuery(other)
curr = bleve.NewBooleanQuery()
curr.AddMust(or)
} else {
lastOp = TokenOR
}
case TokenAND:
if next.Type != TokenLParen && !(next.Type == TokenNOT && following.Type == TokenLParen) {
// if this is not followed by a "(" consider the follow next word as the only parameter
other := bleve.NewMatchQuery(next.Value)
other.SetOperator(bleveQuery.MatchQueryOperatorAnd)
other.SetField(field)
and := bleve.NewConjunctionQuery()
and.AddQuery(curr)
and.AddQuery(other)
curr = bleve.NewBooleanQuery()
curr.AddMust(and)
} else {
lastOp = TokenAND
}
case TokenNOT:
if next.Type != TokenLParen {
// if this is not followed by a "(" or "NOT (" consider the follow next word as the only parameter
other := bleve.NewMatchQuery(next.Value)
other.SetOperator(bleveQuery.MatchQueryOperatorAnd)
other.SetField(field)
curr.AddMustNot(other)
} else {
negated = true
}
default:
p.lexer.ReturnToken(next)
}
}
return curr, exactMatches, nil
}
+57
View File
@@ -0,0 +1,57 @@
package bleve
import (
"testing"
"github.com/blevesearch/bleve/v2"
"github.com/stretchr/testify/require"
)
func TestParseQuery(t *testing.T) {
mapping := bleve.NewIndexMapping()
mapping.DefaultAnalyzer = "en"
index, err := bleve.NewMemOnly(mapping)
require.NoError(t, err)
docs := []map[string]interface{}{
{"id": "1", "phrase": "I like fruit especially banana and strawberry"},
{"id": "2", "phrase": "I like fruit like apples and oranges"},
{"id": "3", "phrase": "I like vegetables but not fruit"},
{"id": "4", "phrase": "Banana bread is delicious"},
{"id": "5", "phrase": "Strawberry jam and banana smoothie"},
}
for _, doc := range docs {
err := index.Index(doc["id"].(string), doc)
require.NoError(t, err)
}
testQueries := []struct {
query string
expected int
exactMatches []string
}{
{"fruit", 3, nil},
{"banana (NOT delicious)", 2, nil},
{"banana (NOT delicious) bread", 0, nil},
{"smoothie OR apples", 2, nil},
{"smoothie OR apples (NOT fruit)", 1, nil},
{"\"I like\"", 3, []string{"i like"}},
{"banana \"I like fruit\" strawberries", 1, []string{"i like fruit"}},
{"\"I like fruit\" (strawberry OR apple)", 2, []string{"i like fruit"}},
}
for _, test := range testQueries {
query, exactMatches, err := parse(test.query, "phrase")
require.NoError(t, err)
require.Equal(t, test.exactMatches, exactMatches)
search := bleve.NewSearchRequest(query)
results, err := index.Search(search)
require.NoError(t, err)
require.Equal(t, test.expected, int(results.Total),
"query '%s' expected %d results, got %d", test.query, test.expected, results.Total)
}
}
-37
View File
@@ -1,37 +0,0 @@
package bleve
import (
"fmt"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore"
)
func (b *BleveBackend) ReplaceEvent(evt nostr.Event) error {
b.Lock()
defer b.Unlock()
filter := nostr.Filter{Kinds: []nostr.Kind{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
if evt.Kind.IsAddressable() {
filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
}
shouldStore := true
for previous := range b.QueryEvents(filter, 1) {
if nostr.IsOlder(previous, evt) {
if err := b.DeleteEvent(previous.ID); err != nil {
return fmt.Errorf("failed to delete event for replacing: %w", err)
}
} else {
shouldStore = false
}
}
if shouldStore {
if err := b.SaveEvent(evt); err != nil && err != eventstore.ErrDupEvent {
return fmt.Errorf("failed to save: %w", err)
}
}
return nil
}
-23
View File
@@ -1,23 +0,0 @@
package bleve
import (
"fmt"
"strconv"
"fiatjaf.com/nostr"
)
func (b *BleveBackend) SaveEvent(evt nostr.Event) error {
doc := map[string]interface{}{
contentField: evt.Content,
kindField: strconv.Itoa(int(evt.Kind)),
pubkeyField: evt.PubKey.Hex()[56:],
createdAtField: float64(evt.CreatedAt),
}
if err := b.index.Index(evt.ID.Hex(), doc); err != nil {
return fmt.Errorf("failed to index '%s' document: %w", evt.ID, err)
}
return nil
}
+3
View File
@@ -28,6 +28,8 @@ type BoltBackend struct {
MapSize int64
DB *bbolt.DB
ReadOnly bool
EnableHLLCacheFor func(kind nostr.Kind) (useCache bool, skipSavingActualEvent bool)
}
@@ -36,6 +38,7 @@ func (b *BoltBackend) Init() error {
Timeout: 2 * time.Second,
PreLoadFreelist: true,
FreelistType: bbolt.FreelistMapType,
ReadOnly: b.ReadOnly,
})
if err != nil {
return err
+8 -6
View File
@@ -8,8 +8,8 @@ import (
"go.etcd.io/bbolt"
)
func (b *BoltBackend) ReplaceEvent(evt nostr.Event) error {
return b.DB.Update(func(txn *bbolt.Tx) error {
func (b *BoltBackend) ReplaceEvent(evt nostr.Event) (deleted []nostr.Event, err error) {
err = b.DB.Update(func(txn *bbolt.Tx) error {
rawBucket := txn.Bucket(rawEventStore)
// check if we already have this id
@@ -25,12 +25,12 @@ func (b *BoltBackend) ReplaceEvent(evt nostr.Event) error {
}
// now we fetch the past events, whatever they are, delete them and then save the new
var err error
var qerr error
var results iter.Seq[nostr.Event] = func(yield func(nostr.Event) bool) {
err = b.query(txn, filter, 10 /* in theory limit could be just 1 and this should work */, yield)
qerr = b.query(txn, filter, 10 /* in theory limit could be just 1 and this should work */, yield)
}
if err != nil {
return fmt.Errorf("failed to query past events with %s: %w", filter, err)
if qerr != nil {
return fmt.Errorf("failed to query past events with %s: %w", filter, qerr)
}
shouldStore := true
@@ -39,6 +39,7 @@ func (b *BoltBackend) ReplaceEvent(evt nostr.Event) error {
if err := b.delete(txn, previous.ID); err != nil {
return fmt.Errorf("failed to delete event %s for replacing: %w", previous.ID, err)
}
deleted = append(deleted, previous)
} else {
// there is a newer event already stored, so we won't store this
shouldStore = false
@@ -50,4 +51,5 @@ func (b *BoltBackend) ReplaceEvent(evt nostr.Event) error {
return nil
})
return deleted, err
}
+6 -6
View File
@@ -40,12 +40,12 @@ func Marshal(evt nostr.Event, buf []byte) error {
buf[0] = 0
if evt.Kind > MaxKind {
return fmt.Errorf("kind is too big: %d, max is %d", evt.Kind, MaxKind)
return fmt.Errorf("kind is too big: %d, max is %d", evt.Kind, uint16(MaxKind))
}
binary.LittleEndian.PutUint16(buf[1:3], uint16(evt.Kind))
if evt.CreatedAt > MaxCreatedAt {
return fmt.Errorf("created_at is too big: %d, max is %d", evt.CreatedAt, MaxCreatedAt)
return fmt.Errorf("created_at is too big: %d, max is %d", evt.CreatedAt, uint32(MaxCreatedAt))
}
binary.LittleEndian.PutUint32(buf[3:7], uint32(evt.CreatedAt))
@@ -58,7 +58,7 @@ func Marshal(evt nostr.Event, buf []byte) error {
ntags := len(evt.Tags)
if ntags > MaxTagCount {
return fmt.Errorf("can't encode too many tags: %d, max is %d", ntags, MaxTagCount)
return fmt.Errorf("can't encode too many tags: %d, max is %d", ntags, uint16(MaxTagCount))
}
binary.LittleEndian.PutUint16(buf[137:139], uint16(ntags))
@@ -68,7 +68,7 @@ func Marshal(evt nostr.Event, buf []byte) error {
itemCount := len(tag)
if itemCount > MaxTagItemCount {
return fmt.Errorf("can't encode a tag with so many items: %d, max is %d", itemCount, MaxTagItemCount)
return fmt.Errorf("can't encode a tag with so many items: %d, max is %d", itemCount, uint8(MaxTagItemCount))
}
buf[tagBase+tagOffset] = uint8(itemCount)
@@ -76,7 +76,7 @@ func Marshal(evt nostr.Event, buf []byte) error {
for _, item := range tag {
itemSize := len(item)
if itemSize > MaxTagItemSize {
return fmt.Errorf("tag item is too large: %d, max is %d", itemSize, MaxTagItemSize)
return fmt.Errorf("tag item is too large: %d, max is %d", itemSize, uint16(MaxTagItemSize))
}
binary.LittleEndian.PutUint16(buf[tagBase+tagOffset+itemOffset:], uint16(itemSize))
@@ -91,7 +91,7 @@ func Marshal(evt nostr.Event, buf []byte) error {
// content
if contentLength := len(evt.Content); contentLength > MaxContentSize {
return fmt.Errorf("content is too large: %d, max is %d", contentLength, MaxContentSize)
return fmt.Errorf("content is too large: %d, max is %d", contentLength, uint16(MaxContentSize))
} else {
binary.LittleEndian.PutUint16(buf[tagBase+tagsSectionLength:], uint16(contentLength))
}
-2
View File
@@ -2,7 +2,6 @@ package checks
import (
"fiatjaf.com/nostr/eventstore"
"fiatjaf.com/nostr/eventstore/bleve"
"fiatjaf.com/nostr/eventstore/boltdb"
"fiatjaf.com/nostr/eventstore/lmdb"
"fiatjaf.com/nostr/eventstore/mmm"
@@ -13,5 +12,4 @@ var (
_ eventstore.Store = (*lmdb.LMDBBackend)(nil)
_ eventstore.Store = (*mmm.IndexingLayer)(nil)
_ eventstore.Store = (*boltdb.BoltBackend)(nil)
_ eventstore.Store = (*bleve.BleveBackend)(nil)
)
+8 -14
View File
@@ -36,18 +36,15 @@ func (b *LMDBBackend) CountEvents(filter nostr.Filter) (uint32, error) {
// we already have a k and a v and an err from the cursor setup, so check and use these
if it.exhausted ||
it.err != nil ||
len(it.key) != q.keySize ||
len(it.key) != len(q.prefix)+4 ||
!bytes.HasPrefix(it.key, q.prefix) {
// either iteration has errored or we reached the end of this prefix
break // stop this cursor and move to the next one
}
// "id" indexes don't contain a timestamp
if q.dbi != b.indexId {
createdAt := binary.BigEndian.Uint32(it.key[len(it.key)-4:])
if createdAt < since {
break
}
createdAt := binary.BigEndian.Uint32(it.key[len(it.key)-4:])
if createdAt < since {
break
}
if extraAuthors == nil && extraKinds == nil && extraTagValues == nil {
@@ -129,18 +126,15 @@ func (b *LMDBBackend) CountEventsHLL(filter nostr.Filter, offset int) (uint32, *
for {
// we already have a k and a v and an err from the cursor setup, so check and use these
if it.err != nil ||
len(it.key) != q.keySize ||
len(it.key) != len(q.prefix)+4 ||
!bytes.HasPrefix(it.key, q.prefix) {
// either iteration has errored or we reached the end of this prefix
break // stop this cursor and move to the next one
}
// "id" indexes don't contain a timestamp
if q.dbi != b.indexId {
createdAt := binary.BigEndian.Uint32(it.key[len(it.key)-4:])
if createdAt < since {
break
}
createdAt := binary.BigEndian.Uint32(it.key[len(it.key)-4:])
if createdAt < since {
break
}
// fetch actual event (we need it regardless because we need the pubkey for the hll)
+1 -1
View File
@@ -45,7 +45,7 @@ func (it *iterator) pull(n int, since uint32) {
return
}
if len(it.key) != query.keySize || !bytes.HasPrefix(it.key, query.prefix) {
if len(it.key) != len(query.prefix)+4 || !bytes.HasPrefix(it.key, query.prefix) {
// we reached the end of this prefix
it.exhausted = true
return
+1 -27
View File
@@ -4,7 +4,6 @@ import (
"encoding/binary"
"fmt"
"os"
"sync/atomic"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore"
@@ -34,8 +33,6 @@ type LMDBBackend struct {
hllCache lmdb.DBI
EnableHLLCacheFor func(kind nostr.Kind) (useCache bool, skipSavingActualEvent bool)
lastId atomic.Uint32
}
func (b *LMDBBackend) Init() error {
@@ -112,7 +109,7 @@ func (b *LMDBBackend) initialize() error {
env.SetMapSize(b.MapSize)
}
if err := env.Open(b.Path, lmdb.NoTLS|lmdb.WriteMap|b.extraFlags, 0644); err != nil {
if err := env.Open(b.Path, lmdb.NoTLS|b.extraFlags, 0644); err != nil {
return err
}
b.lmdbEnv = env
@@ -186,28 +183,5 @@ func (b *LMDBBackend) initialize() error {
return err
}
// get lastId
if err := b.lmdbEnv.View(func(txn *lmdb.Txn) error {
txn.RawRead = true
cursor, err := txn.OpenCursor(b.rawEventStore)
if err != nil {
return err
}
defer cursor.Close()
k, _, err := cursor.Get(nil, nil, lmdb.Last)
if lmdb.IsNotFound(err) {
// nothing found, so we're at zero
return nil
}
if err != nil {
return err
}
b.lastId.Store(binary.BigEndian.Uint32(k))
return nil
}); err != nil {
return err
}
return b.migrate()
}
-1
View File
@@ -54,7 +54,6 @@ func (b *LMDBBackend) queryByIds(txn *lmdb.Txn, ids []nostr.ID, yield func(nostr
continue
}
txn.Get(b.rawEventStore, idx)
bin, err := txn.Get(b.rawEventStore, idx)
if err != nil {
continue
+43 -36
View File
@@ -14,7 +14,6 @@ type query struct {
i int
dbi lmdb.DBI
prefix []byte
keySize int
startingPoint []byte
}
@@ -40,10 +39,10 @@ func (b *LMDBBackend) prepareQueries(filter nostr.Filter) (
}
}
for i, q := range queries {
sp := make([]byte, len(q.prefix))
sp = sp[0:len(q.prefix)]
copy(sp, q.prefix)
queries[i].startingPoint = binary.BigEndian.AppendUint32(sp, uint32(until))
sp := make([]byte, len(q.prefix)+4)
copy(sp[0:len(q.prefix)], q.prefix)
binary.BigEndian.PutUint32(sp[len(q.prefix):], uint32(until))
queries[i].startingPoint = sp
}
}()
@@ -64,39 +63,27 @@ func (b *LMDBBackend) prepareQueries(filter nostr.Filter) (
}
// only "p" tag has a goodness of 2, so
if goodness == 2 {
if goodness == 2 && filter.Kinds != nil {
// this means we got a "p" tag, so we will use the ptag-kind index
i := 0
if filter.Kinds != nil {
queries = make([]query, len(tagValues)*len(filter.Kinds))
for _, value := range tagValues {
if len(value) != 64 {
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
}
for _, kind := range filter.Kinds {
k := make([]byte, 8+2)
if err := xhex.Decode(k[0:8], []byte(value[0:8*2])); err != nil {
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
}
binary.BigEndian.PutUint16(k[8:8+2], uint16(kind))
queries[i] = query{i: i, dbi: b.indexPTagKind, prefix: k[0 : 8+2], keySize: 8 + 2 + 4}
i++
}
queries = make([]query, len(tagValues)*len(filter.Kinds))
for _, value := range tagValues {
if len(value) != 64 {
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
}
} else {
// even if there are no kinds, in that case we will just return any kind and not care
queries = make([]query, len(tagValues))
for i, value := range tagValues {
if len(value) != 64 {
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
}
k := make([]byte, 8)
for _, kind := range filter.Kinds {
k := make([]byte, 8+2)
if err := xhex.Decode(k[0:8], []byte(value[0:8*2])); err != nil {
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
}
queries[i] = query{i: i, dbi: b.indexPTagKind, prefix: k[0:8], keySize: 8 + 2 + 4}
binary.BigEndian.PutUint16(k[8:8+2], uint16(kind))
queries[i] = query{
i: i,
dbi: b.indexPTagKind,
prefix: k[0 : 8+2],
}
i++
}
}
} else {
@@ -107,7 +94,11 @@ func (b *LMDBBackend) prepareQueries(filter nostr.Filter) (
dbi, k, offset := b.getTagIndexPrefix(tagKey, value)
// remove the last parts part to get just the prefix we want here
prefix := k[0:offset]
queries[i] = query{i: i, dbi: dbi, prefix: prefix, keySize: len(prefix) + 4}
queries[i] = query{
i: i,
dbi: dbi,
prefix: prefix,
}
}
// add an extra kind filter if available (only do this on plain tag index, not on ptag-kind index)
@@ -142,7 +133,11 @@ pubkeyMatching:
// will use pubkey index
queries = make([]query, len(filter.Authors))
for i, pk := range filter.Authors {
queries[i] = query{i: i, dbi: b.indexPubkey, prefix: pk[0:8], keySize: 8 + 4}
queries[i] = query{
i: i,
dbi: b.indexPubkey,
prefix: pk[0:8],
}
}
} else {
// will use pubkeyKind index
@@ -153,7 +148,11 @@ pubkeyMatching:
prefix := make([]byte, 8+2)
copy(prefix[0:8], pk[0:8])
binary.BigEndian.PutUint16(prefix[8:8+2], uint16(kind))
queries[i] = query{i: i, dbi: b.indexPubkeyKind, prefix: prefix[0 : 8+2], keySize: 10 + 4}
queries[i] = query{
i: i,
dbi: b.indexPubkeyKind,
prefix: prefix[0 : 8+2],
}
i++
}
}
@@ -170,7 +169,11 @@ pubkeyMatching:
for i, kind := range filter.Kinds {
prefix := make([]byte, 2)
binary.BigEndian.PutUint16(prefix[0:2], uint16(kind))
queries[i] = query{i: i, dbi: b.indexKind, prefix: prefix[0:2], keySize: 2 + 4}
queries[i] = query{
i: i,
dbi: b.indexKind,
prefix: prefix[0:2],
}
}
// potentially with an extra useless tag filtering
@@ -181,6 +184,10 @@ pubkeyMatching:
// if we got here our query will have nothing to filter with
queries = make([]query, 1)
prefix := make([]byte, 0)
queries[0] = query{i: 0, dbi: b.indexCreatedAt, prefix: prefix, keySize: 0 + 4}
queries[0] = query{
i: 0,
dbi: b.indexCreatedAt,
prefix: prefix,
}
return queries, nil, nil, "", nil, since, nil
}
+12 -14
View File
@@ -2,14 +2,13 @@ package lmdb
import (
"fmt"
"iter"
"fiatjaf.com/nostr"
"github.com/PowerDNS/lmdb-go/lmdb"
)
func (b *LMDBBackend) ReplaceEvent(evt nostr.Event) error {
return b.lmdbEnv.Update(func(txn *lmdb.Txn) error {
func (b *LMDBBackend) ReplaceEvent(evt nostr.Event) (deleted []nostr.Event, err error) {
err = b.lmdbEnv.Update(func(txn *lmdb.Txn) error {
// check if we already have this id
_, existsErr := txn.Get(b.indexId, evt.ID[0:8])
if existsErr == nil {
@@ -26,24 +25,21 @@ func (b *LMDBBackend) ReplaceEvent(evt nostr.Event) error {
}
// now we fetch the past events, whatever they are, delete them and then save the new
var err error
var results iter.Seq[nostr.Event] = func(yield func(nostr.Event) bool) {
err = b.query(txn, filter, 10 /* in theory limit could be just 1 and this should work */, yield)
}
if err != nil {
return fmt.Errorf("failed to query past events with %s: %w", filter, err)
}
shouldStore := true
for previous := range results {
if qerr := b.query(txn, filter, 10 /* could be just 1 */, func(previous nostr.Event) bool {
if nostr.IsOlder(previous, evt) {
if err := b.delete(txn, previous.ID); err != nil {
return fmt.Errorf("failed to delete event %s for replacing: %w", previous.ID, err)
if qerr := b.delete(txn, previous.ID); qerr != nil {
qerr = fmt.Errorf("failed to delete event %s for replacing: %w", previous.ID, qerr)
return false
}
deleted = append(deleted, previous)
} else {
// there is a newer event already stored, so we won't store this
shouldStore = false
}
return true
}); qerr != nil {
return fmt.Errorf("failed to query past events with %s: %w", filter, qerr)
}
if shouldStore {
return b.save(txn, evt)
@@ -51,4 +47,6 @@ func (b *LMDBBackend) ReplaceEvent(evt nostr.Event) error {
return nil
})
return deleted, err
}
+4 -7
View File
@@ -33,18 +33,15 @@ func (il *IndexingLayer) CountEvents(filter nostr.Filter) (uint32, error) {
// we already have a k and a v and an err from the cursor setup, so check and use these
if it.exhausted ||
it.err != nil ||
len(it.key) != q.keySize ||
len(it.key) != len(q.prefix)+4 ||
!bytes.HasPrefix(it.key, q.prefix) {
// either iteration has errored or we reached the end of this prefix
break // stop this cursor and move to the next one
}
// "id" indexes don't contain a timestamp
if q.timestampSize == 4 {
createdAt := binary.BigEndian.Uint32(it.key[len(it.key)-4:])
if createdAt < since {
break
}
createdAt := binary.BigEndian.Uint32(it.key[len(it.key)-4:])
if createdAt < since {
break
}
if extraAuthors == nil && extraKinds == nil && extraTagValues == nil {
+1 -2
View File
@@ -116,8 +116,7 @@ func (b *MultiMmapManager) Rescan() error {
}
}
b.freeRanges, err = b.gatherFreeRanges(mmmtxn)
if err != nil {
if err := b.gatherFreeRanges(mmmtxn); err != nil {
return err
}
+69 -16
View File
@@ -8,10 +8,12 @@ import (
"github.com/PowerDNS/lmdb-go/lmdb"
)
func (b *MultiMmapManager) gatherFreeRanges(txn *lmdb.Txn) (positions, error) {
const LARGE_FREERANGE = 142
func (b *MultiMmapManager) gatherFreeRanges(txn *lmdb.Txn) error {
cursor, err := txn.OpenCursor(b.indexId)
if err != nil {
return nil, fmt.Errorf("failed to open cursor on indexId: %w", err)
return fmt.Errorf("failed to open cursor on indexId: %w", err)
}
defer cursor.Close()
@@ -28,31 +30,35 @@ func (b *MultiMmapManager) gatherFreeRanges(txn *lmdb.Txn) (positions, error) {
usedPositions = append(usedPositions, position{start: b.mmapfEnd, size: 0})
// calculate free ranges as gaps between used positions
freeRanges := make(positions, 0, len(usedPositions)/2)
b.freeRangesAll = make(positions, 0, len(usedPositions))
b.freeRangesLarge = make([]position, 0, len(usedPositions)/10)
var currentStart uint64 = 0
for _, used := range usedPositions {
if used.start > currentStart {
// gap from currentStart to pos.start
freeSize := used.start - currentStart
if freeSize > 0 {
freeRanges = append(freeRanges, position{
fr := position{
start: currentStart,
size: uint32(freeSize),
})
}
b.freeRangesAll = append(b.freeRangesAll, fr)
if fr.isLarge() {
b.freeRangesLarge = append(b.freeRangesLarge, fr)
}
}
}
currentStart = used.start + uint64(used.size)
}
return freeRanges, nil
return nil
}
func (b *MultiMmapManager) mergeNewFreeRange(newFreeRange position) {
// use binary search to find the insertion point for the new pos
idx, exists := slices.BinarySearchFunc(b.freeRanges, newFreeRange.start, func(item position, target uint64) int {
idx, exists := slices.BinarySearchFunc(b.freeRangesAll, newFreeRange.start, func(item position, target uint64) int {
return cmp.Compare(item.start, target)
})
if exists {
panic(fmt.Errorf("can't add free range that already exists: %s", newFreeRange))
}
@@ -62,7 +68,7 @@ func (b *MultiMmapManager) mergeNewFreeRange(newFreeRange position) {
// check the range immediately before
if idx > 0 {
before := b.freeRanges[idx-1]
before := b.freeRangesAll[idx-1]
if before.start+uint64(before.size) == newFreeRange.start {
deleteStart = idx - 1
deleting++
@@ -72,8 +78,8 @@ func (b *MultiMmapManager) mergeNewFreeRange(newFreeRange position) {
}
// check the range immediately after
if idx < len(b.freeRanges) {
after := b.freeRanges[idx]
if idx < len(b.freeRangesAll) {
after := b.freeRangesAll[idx]
if newFreeRange.start+uint64(newFreeRange.size) == after.start {
if deleteStart == -1 {
deleteStart = idx
@@ -87,13 +93,60 @@ func (b *MultiMmapManager) mergeNewFreeRange(newFreeRange position) {
switch deleting {
case 0:
// if we are not deleting anything we must insert the new free range
b.freeRanges = slices.Insert(b.freeRanges, idx, newFreeRange)
b.freeRangesAll = slices.Insert(b.freeRangesAll, idx, newFreeRange)
// if it's large add it to the list of large free ranges
if newFreeRange.isLarge() {
b.freeRangesLarge = append(b.freeRangesLarge, newFreeRange)
}
case 1:
deleted := b.freeRangesAll[deleteStart]
// if we're deleting a single range, don't delete it, modify it in-place instead.
b.freeRanges[deleteStart] = newFreeRange
b.freeRangesAll[deleteStart] = newFreeRange
// if the list we're modifying is in the list of large ranges modify it there too
if deleted.isLarge() {
for i, large := range b.freeRangesLarge {
if large.start == deleted.start {
b.freeRangesLarge[i] = newFreeRange
break
}
}
} else if newFreeRange.isLarge() {
// otherwise: if after modification it's big enough we should add it to list of large ranges
b.freeRangesLarge = append(b.freeRangesLarge, newFreeRange)
}
case 2:
// now if we're deleting two ranges, delete just one instead and modify the other in place
b.freeRanges[deleteStart] = newFreeRange
b.freeRanges = slices.Delete(b.freeRanges, deleteStart+1, deleteStart+1+1)
// now if we're deleting two ranges, delete the second instead and modify the first in place
first := b.freeRangesAll[deleteStart]
second := b.freeRangesAll[deleteStart+1]
b.freeRangesAll = slices.Delete(b.freeRangesAll, deleteStart+1, deleteStart+1+1)
b.freeRangesAll[deleteStart] = newFreeRange
// if the second was in the list of large lists delete it from there too
if second.isLarge() {
for i, large := range b.freeRangesLarge {
if large.start == second.start {
b.freeRangesLarge[i] = b.freeRangesLarge[len(b.freeRangesLarge)-1]
b.freeRangesLarge = b.freeRangesLarge[0 : len(b.freeRangesLarge)-1]
break
}
}
}
// if the list we're modifying (the first) is already in the list of large ranges modify it there too
if first.isLarge() {
for i, large := range b.freeRangesLarge {
if large.start == first.start {
b.freeRangesLarge[i] = newFreeRange
break
}
}
} else if newFreeRange.isLarge() {
// otherwise if after modification has become big enough we should add it to list of large ranges
b.freeRangesLarge = append(b.freeRangesLarge, newFreeRange)
}
}
}
+75 -7
View File
@@ -45,7 +45,7 @@ func FuzzFreeRanges(f *testing.F) {
total := 0
for {
freeBefore, spaceBefore := countUsableFreeRanges(mmmm)
freeBefore, spaceBefore := countUsableFreeRanges(t, mmmm)
hasAdded := false
for i := range rnd.IntN(40) {
@@ -69,7 +69,7 @@ func FuzzFreeRanges(f *testing.F) {
total++
}
freeAfter, spaceAfter := countUsableFreeRanges(mmmm)
freeAfter, spaceAfter := countUsableFreeRanges(t, mmmm)
if hasAdded && freeBefore > 0 {
require.Lessf(t, spaceAfter, spaceBefore, "must use some of the existing free ranges when inserting new events (before: %d, after: %d)", freeBefore, freeAfter)
}
@@ -86,9 +86,35 @@ func FuzzFreeRanges(f *testing.F) {
}
}
verifyFreeRangesInvariants(t, mmmm)
// add more events
for i := range rnd.IntN(40) {
content := "1"
if i > 0 {
content = strings.Repeat("z", rnd.IntN(1000))
}
evt := nostr.Event{
CreatedAt: nostr.Timestamp(rnd.Uint32()),
Kind: 1,
Content: content,
Tags: nostr.Tags{},
}
evt.Sign(sk)
err := il.SaveEvent(evt)
require.NoError(t, err)
total++
}
verifyFreeRangesInvariants(t, mmmm)
mmmm.lmdbEnv.View(func(txn *lmdb.Txn) error {
expectedFreeRanges, _ := mmmm.gatherFreeRanges(txn)
require.Equalf(t, expectedFreeRanges, mmmm.freeRanges, "expected %s, got %s", expectedFreeRanges, mmmm.freeRanges)
before := mmmm.freeRangesAll
err := mmmm.gatherFreeRanges(txn)
require.NoError(t, err)
require.Equalf(t, mmmm.freeRangesAll, before, "expected %s, got %s", before, mmmm.freeRangesAll)
return nil
})
@@ -99,12 +125,54 @@ func FuzzFreeRanges(f *testing.F) {
})
}
func countUsableFreeRanges(mmmm *MultiMmapManager) (count int, space int) {
for _, fr := range mmmm.freeRanges {
if fr.size >= 142 {
func countUsableFreeRanges(t *testing.T, mmmm *MultiMmapManager) (count int, space int) {
for _, fr := range mmmm.freeRangesAll {
if fr.size >= LARGE_FREERANGE {
count++
space += int(fr.size)
}
}
require.Equal(t, count, len(mmmm.freeRangesLarge))
return count, space
}
func verifyFreeRangesInvariants(t *testing.T, mmmm *MultiMmapManager) {
all := mmmm.freeRangesAll
large := mmmm.freeRangesLarge
for _, l := range large {
found := false
for _, a := range all {
if l.start == a.start && l.size == a.size {
found = true
break
}
}
require.True(t, found, "large range %v not found in all ranges", l)
}
for i := 1; i < len(all); i++ {
require.Greater(t, all[i].start, all[i-1].start, "all ranges should be sorted by start")
}
for i := range all {
for j := i + 1; j < len(all); j++ {
end1 := all[i].start + uint64(all[i].size)
end2 := all[j].start + uint64(all[j].size)
require.False(t, (all[i].start >= all[j].start && all[i].start < end2) ||
(all[j].start >= all[i].start && all[j].start < end1),
"ranges %v and %v overlap", all[i], all[j])
}
}
mmmm.lmdbEnv.View(func(txn *lmdb.Txn) error {
before := make(positions, len(mmmm.freeRangesAll))
copy(before, mmmm.freeRangesAll)
err := mmmm.gatherFreeRanges(txn)
require.NoError(t, err)
require.Equal(t, before, mmmm.freeRangesAll, "recomputing free ranges should yield the same result")
return nil
})
}
+2 -2
View File
@@ -41,7 +41,7 @@ func (it *iterator) pull(n int, since uint32) {
return
}
if len(it.key) != it.query.keySize || !bytes.HasPrefix(it.key, it.query.prefix) {
if len(it.key) != len(it.query.prefix)+4 || !bytes.HasPrefix(it.key, it.query.prefix) {
// we reached the end of this prefix
it.exhausted = true
return
@@ -226,7 +226,7 @@ func (il *IndexingLayer) getIndexKeysForEvent(evt nostr.Event) iter.Seq[key] {
return
}
// now the p-tag+kind+date
// now the p-1733934977tag+kind+date
if dbi == il.indexTag32 && tag[0] == "p" {
k := make([]byte, 8+2+4)
xhex.Decode(k[0:8], []byte(tag[1][0:8*2]))
+1 -1
View File
@@ -61,7 +61,7 @@ func (il *IndexingLayer) Init() error {
env.SetMaxDBs(9)
env.SetMaxReaders(1000)
env.SetMapSize(1 << 38) // ~273GB
env.SetMapSize(MMAP_INFINITE_SIZE)
// create directory if it doesn't exist and open it
if err := os.MkdirAll(path, 0755); err != nil {
+46 -24
View File
@@ -35,13 +35,15 @@ type MultiMmapManager struct {
mmapfEnd uint64
writeMutex sync.Mutex
lockfile *os.File
lmdbEnv *lmdb.Env
stuff lmdb.DBI
knownLayers lmdb.DBI
indexId lmdb.DBI
freeRanges positions
freeRangesAll positions // sorted by position
freeRangesLarge []position // unsorted
}
func (b *MultiMmapManager) String() string {
@@ -49,33 +51,43 @@ func (b *MultiMmapManager) String() string {
}
const (
MMAP_INFINITE_SIZE = 1 << 40
MMAP_INFINITE_SIZE = 100_000_000_000
maxuint16 = 65535
maxuint32 = 4294967295
)
func (b *MultiMmapManager) Init() error {
func (b *MultiMmapManager) Init() (err error) {
if b.Logger == nil {
nopLogger := zerolog.Nop()
b.Logger = &nopLogger
}
defer func() {
if err != nil {
b.releaseLock()
}
}()
// create directory if it doesn't exist
dbpath := filepath.Join(b.Dir, "mmmm")
if err := os.MkdirAll(dbpath, 0755); err != nil {
return fmt.Errorf("failed to create directory %s: %w", dbpath, err)
}
if !b.ReadOnly {
// create lockfile to prevent multiple instances
lockfilePath := filepath.Join(b.Dir, "mmmm.lock")
if _, err := os.OpenFile(lockfilePath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0644); err != nil {
if os.IsExist(err) {
return fmt.Errorf("database at %s is already in use by another instance", b.Dir)
}
return fmt.Errorf("failed to create lockfile %s: %w", lockfilePath, err)
}
// lock database directory to prevent multiple instances
lockfilePath := filepath.Join(b.Dir, "mmmm.lock")
lockfile, err := os.OpenFile(lockfilePath, os.O_CREATE|os.O_RDWR, 0644)
if err != nil {
return fmt.Errorf("failed to open lockfile %s: %w", lockfilePath, err)
}
if err := syscall.Flock(int(lockfile.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil {
lockfile.Close()
if errors.Is(err, syscall.EWOULDBLOCK) || errors.Is(err, syscall.EAGAIN) {
return fmt.Errorf("database at %s is already in use by another instance", b.Dir)
}
return fmt.Errorf("failed to lock database at %s: %w", b.Dir, err)
}
b.lockfile = lockfile
// open a huge mmapped file
b.mmapfPath = filepath.Join(b.Dir, "events")
@@ -83,7 +95,7 @@ func (b *MultiMmapManager) Init() error {
if err != nil {
return fmt.Errorf("failed to open events file at %s: %w", b.mmapfPath, err)
}
mmapf, err := syscall.Mmap(int(file.Fd()), 0, MMAP_INFINITE_SIZE,
mmapf, err := syscall.Mmap(int(file.Fd()), 0, int(MMAP_INFINITE_SIZE),
syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)
if err != nil {
return fmt.Errorf("failed to mmap events file at %s: %w", b.mmapfPath, err)
@@ -104,7 +116,7 @@ func (b *MultiMmapManager) Init() error {
env.SetMaxDBs(3)
env.SetMaxReaders(1000)
env.SetMapSize(1 << 38) // ~273GB
env.SetMapSize(MMAP_INFINITE_SIZE)
err = env.Open(dbpath, lmdb.NoTLS, 0644)
if err != nil {
@@ -139,18 +151,17 @@ func (b *MultiMmapManager) Init() error {
if !b.ReadOnly {
// scan index table to calculate free ranges from used positions
b.freeRanges, err = b.gatherFreeRanges(txn)
if err != nil {
if err := b.gatherFreeRanges(txn); err != nil {
return err
}
logOp := b.Logger.Debug()
for _, pos := range b.freeRanges {
if pos.size > 20 {
logOp = logOp.Uint32(fmt.Sprintf("%d", pos.start), pos.size)
}
count := 0
for _, pos := range b.freeRangesLarge {
logOp = logOp.Uint32(fmt.Sprintf("%d", pos.start), pos.size)
count++
}
logOp.Msg("calculated free ranges from index scan")
logOp.Int("count", count).Msg("calculated free ranges from index scan")
}
return nil
@@ -365,6 +376,19 @@ func (b *MultiMmapManager) getNextAvailableLayerId(txn *lmdb.Txn) (uint16, error
return id, nil
}
func (b *MultiMmapManager) releaseLock() {
if b.lockfile == nil {
return
}
_ = syscall.Flock(int(b.lockfile.Fd()), syscall.LOCK_UN)
_ = b.lockfile.Close()
b.lockfile = nil
lockfilePath := filepath.Join(b.Dir, "mmmm.lock")
_ = os.Remove(lockfilePath)
}
func (b *MultiMmapManager) Close() {
b.lmdbEnv.Close()
for _, il := range b.layers {
@@ -373,7 +397,5 @@ func (b *MultiMmapManager) Close() {
syscall.Munmap(b.mmapf)
// remove lockfile
lockfilePath := filepath.Join(b.Dir, "mmmm.lock")
os.Remove(lockfilePath)
b.releaseLock()
}
+19 -2
View File
@@ -1,22 +1,35 @@
package mmm
import (
"cmp"
"encoding/binary"
"fmt"
"slices"
"strings"
)
type positions []position
func (poss positions) find(start uint64) (idx int) {
idx, _ = slices.BinarySearchFunc(poss, start, func(item position, target uint64) int {
return cmp.Compare(item.start, target)
})
return idx
}
func (poss positions) del(start uint64) positions {
idx := poss.find(start)
return slices.Delete(poss, idx, idx+1)
}
func (poss positions) String() string {
str := strings.Builder{}
str.Grow(10 + 20*len(poss))
str.WriteString("positions:[")
for _, pos := range poss {
str.WriteByte(' ')
str.WriteString(pos.String())
}
str.WriteString(" ]")
str.WriteString("]")
return str.String()
}
@@ -29,6 +42,10 @@ func (pos position) String() string {
return fmt.Sprintf("<%d|%d|%d>", pos.start, pos.size, pos.start+uint64(pos.size))
}
func (pos position) isLarge() bool {
return pos.size >= LARGE_FREERANGE
}
func positionFromBytes(posb []byte) position {
return position{
size: binary.BigEndian.Uint32(posb[0:4]),
+11 -3
View File
@@ -7,6 +7,7 @@ import (
"log"
"math"
"slices"
"sync"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore/codec/betterbinary"
@@ -14,6 +15,12 @@ import (
"github.com/PowerDNS/lmdb-go/lmdb"
)
var tempResultsPool = sync.Pool{
New: func() any {
return make([]nostr.Event, 0, 64)
},
}
// GetByID returns the event -- if found in this mmm -- and all the IndexingLayers it belongs to.
func (b *MultiMmapManager) GetByID(id nostr.ID) (*nostr.Event, IndexingLayers) {
var event *nostr.Event
@@ -140,7 +147,8 @@ func (il *IndexingLayer) query(txn *lmdb.Txn, filter nostr.Filter, limit int, yi
numberOfIteratorsToPullOnEachRound := max(1, int(math.Ceil(float64(len(iterators))/float64(12))))
totalEventsEmitted := 0
tempResults := make([]nostr.Event, 0, batchSizePerQuery*2)
tempResults := tempResultsPool.Get().([]nostr.Event)
defer tempResultsPool.Put(tempResults[:0])
for len(iterators) > 0 {
// reset stuff
@@ -180,8 +188,8 @@ func (il *IndexingLayer) query(txn *lmdb.Txn, filter nostr.Filter, limit int, yi
// decode the entire thing
event := nostr.Event{}
if err := betterbinary.Unmarshal(bin, &event); err != nil {
log.Printf("lmdb: value read error (id %x) on query prefix %x sp %x dbi %v: %s\n",
betterbinary.GetID(bin), iterators[i].query.prefix, iterators[i].query.startingPoint, iterators[i].query.dbi, err)
log.Printf("mmm: value read error (id %s) on query prefix %x sp %x dbi %v: %s\n",
betterbinary.GetID(bin).Hex(), iterators[i].query.prefix, iterators[i].query.startingPoint, iterators[i].query.dbi, err)
continue
}
+43 -39
View File
@@ -14,8 +14,6 @@ type query struct {
i int
dbi lmdb.DBI
prefix []byte
keySize int
timestampSize int
startingPoint []byte
}
@@ -41,10 +39,10 @@ func (il *IndexingLayer) prepareQueries(filter nostr.Filter) (
}
}
for i, q := range queries {
sp := make([]byte, len(q.prefix))
sp = sp[0:len(q.prefix)]
copy(sp, q.prefix)
queries[i].startingPoint = binary.BigEndian.AppendUint32(sp, uint32(until))
sp := make([]byte, len(q.prefix)+4)
copy(sp[0:len(q.prefix)], q.prefix)
binary.BigEndian.PutUint32(sp[len(q.prefix):], uint32(until))
queries[i].startingPoint = sp
}
}()
@@ -65,39 +63,27 @@ func (il *IndexingLayer) prepareQueries(filter nostr.Filter) (
}
// only "p" tag has a goodness of 2, so
if goodness == 2 {
if goodness == 2 && filter.Kinds != nil {
// this means we got a "p" tag, so we will use the ptag-kind index
i := 0
if filter.Kinds != nil {
queries = make([]query, len(tagValues)*len(filter.Kinds))
for _, value := range tagValues {
if len(value) != 64 {
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
}
for _, kind := range filter.Kinds {
k := make([]byte, 8+2)
if err := xhex.Decode(k[0:8], []byte(value[0:8*2])); err != nil {
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
}
binary.BigEndian.PutUint16(k[8:8+2], uint16(kind))
queries[i] = query{i: i, dbi: il.indexPTagKind, prefix: k[0 : 8+2], keySize: 8 + 2 + 4, timestampSize: 4}
i++
}
queries = make([]query, len(tagValues)*len(filter.Kinds))
for _, value := range tagValues {
if len(value) != 64 {
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
}
} else {
// even if there are no kinds, in that case we will just return any kind and not care
queries = make([]query, len(tagValues))
for i, value := range tagValues {
if len(value) != 64 {
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
}
k := make([]byte, 8)
for _, kind := range filter.Kinds {
k := make([]byte, 8+2)
if err := xhex.Decode(k[0:8], []byte(value[0:8*2])); err != nil {
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
}
queries[i] = query{i: i, dbi: il.indexPTagKind, prefix: k[0:8], keySize: 8 + 2 + 4, timestampSize: 4}
binary.BigEndian.PutUint16(k[8:8+2], uint16(kind))
queries[i] = query{
i: i,
dbi: il.indexPTagKind,
prefix: k[0 : 8+2],
}
i++
}
}
} else {
@@ -108,7 +94,11 @@ func (il *IndexingLayer) prepareQueries(filter nostr.Filter) (
dbi, k, offset := il.getTagIndexPrefix(tagKey, value)
// remove the last parts part to get just the prefix we want here
prefix := k[0:offset]
queries[i] = query{i: i, dbi: dbi, prefix: prefix, keySize: len(prefix) + 4, timestampSize: 4}
queries[i] = query{
i: i,
dbi: dbi,
prefix: prefix,
}
}
// add an extra kind filter if available (only do this on plain tag index, not on ptag-kind index)
@@ -143,9 +133,11 @@ pubkeyMatching:
// will use pubkey index
queries = make([]query, len(filter.Authors))
for i, pk := range filter.Authors {
prefix := make([]byte, 8)
copy(prefix[0:8], pk[0:8])
queries[i] = query{i: i, dbi: il.indexPubkey, prefix: prefix[0:8], keySize: 8 + 4, timestampSize: 4}
queries[i] = query{
i: i,
dbi: il.indexPubkey,
prefix: pk[0:8],
}
}
} else {
// will use pubkeyKind index
@@ -156,7 +148,11 @@ pubkeyMatching:
prefix := make([]byte, 8+2)
copy(prefix[0:8], pk[0:8])
binary.BigEndian.PutUint16(prefix[8:8+2], uint16(kind))
queries[i] = query{i: i, dbi: il.indexPubkeyKind, prefix: prefix[0 : 8+2], keySize: 10 + 4, timestampSize: 4}
queries[i] = query{
i: i,
dbi: il.indexPubkeyKind,
prefix: prefix[0 : 8+2],
}
i++
}
}
@@ -173,7 +169,11 @@ pubkeyMatching:
for i, kind := range filter.Kinds {
prefix := make([]byte, 2)
binary.BigEndian.PutUint16(prefix[0:2], uint16(kind))
queries[i] = query{i: i, dbi: il.indexKind, prefix: prefix[0:2], keySize: 2 + 4, timestampSize: 4}
queries[i] = query{
i: i,
dbi: il.indexKind,
prefix: prefix[0:2],
}
}
// potentially with an extra useless tag filtering
@@ -184,6 +184,10 @@ pubkeyMatching:
// if we got here our query will have nothing to filter with
queries = make([]query, 1)
prefix := make([]byte, 0)
queries[0] = query{i: 0, dbi: il.indexCreatedAt, prefix: prefix, keySize: 0 + 4, timestampSize: 4}
queries[0] = query{
i: 0,
dbi: il.indexCreatedAt,
prefix: prefix,
}
return queries, nil, nil, "", nil, since, nil
}
+18 -16
View File
@@ -9,9 +9,9 @@ import (
"github.com/PowerDNS/lmdb-go/lmdb"
)
func (il *IndexingLayer) ReplaceEvent(evt nostr.Event) error {
func (il *IndexingLayer) ReplaceEvent(evt nostr.Event) (deleted []nostr.Event, err error) {
if il.mmmm.ReadOnly {
return ReadOnly
return nil, ReadOnly
}
il.mmmm.writeMutex.Lock()
@@ -29,7 +29,7 @@ func (il *IndexingLayer) ReplaceEvent(evt nostr.Event) error {
// prepare transactions
mmmtxn, err := il.mmmm.lmdbEnv.BeginTxn(nil, 0)
if err != nil {
return err
return nil, err
}
defer func() {
// defer abort but only if we haven't committed (we'll set it to nil after committing)
@@ -41,7 +41,7 @@ func (il *IndexingLayer) ReplaceEvent(evt nostr.Event) error {
iltxn, err := il.lmdbEnv.BeginTxn(nil, 0)
if err != nil {
return err
return nil, err
}
defer func() {
// defer abort but only if we haven't committed (we'll set it to nil after committing)
@@ -54,33 +54,35 @@ func (il *IndexingLayer) ReplaceEvent(evt nostr.Event) error {
// check if we already have this id
_, existsErr := mmmtxn.Get(il.mmmm.indexId, evt.ID[0:8])
if existsErr == nil {
return nil
return nil, nil
}
if !lmdb.IsNotFound(existsErr) {
return fmt.Errorf("error checking existence: %w", existsErr)
return nil, fmt.Errorf("error checking existence: %w", existsErr)
}
// now we fetch the past events, whatever they are, delete them and then save the new
var qerr error
var results iter.Seq[nostr.Event] = func(yield func(nostr.Event) bool) {
err = il.query(iltxn, filter, 10 /* in theory limit could be just 1 and this should work */, yield)
qerr = il.query(iltxn, filter, 10 /* in theory limit could be just 1 and this should work */, yield)
}
if err != nil {
return fmt.Errorf("failed to query past events with %s: %w", filter, err)
if qerr != nil {
return nil, fmt.Errorf("failed to query past events with %s: %w", filter, qerr)
}
var acquiredFreeRangeFromDelete *position
shouldStore := true
for previous := range results {
if nostr.IsOlder(previous, evt) {
if pos, shouldPurge, err := il.delete(mmmtxn, iltxn, previous.ID); err != nil {
return fmt.Errorf("failed to delete event %s for replacing: %w", previous.ID, err)
if pos, shouldPurge, derr := il.delete(mmmtxn, iltxn, previous.ID); derr != nil {
return nil, fmt.Errorf("failed to delete event %s for replacing: %w", previous.ID, derr)
} else if shouldPurge {
// purge
if err := mmmtxn.Del(il.mmmm.indexId, previous.ID[0:8], nil); err != nil {
return err
return nil, err
}
acquiredFreeRangeFromDelete = &pos
}
deleted = append(deleted, previous)
} else {
// there is a newer event already stored, so we won't store this
shouldStore = false
@@ -90,17 +92,17 @@ func (il *IndexingLayer) ReplaceEvent(evt nostr.Event) error {
if shouldStore {
_, err := il.mmmm.storeOn(mmmtxn, iltxn, il, evt)
if err != nil {
return err
return nil, err
}
}
// commit in this order to minimize problematic inconsistencies
if err := mmmtxn.Commit(); err != nil {
return fmt.Errorf("can't commit mmmtxn: %w", err)
return nil, fmt.Errorf("can't commit mmmtxn: %w", err)
}
mmmtxn = nil
if err := iltxn.Commit(); err != nil {
return fmt.Errorf("can't commit iltxn: %w", err)
return nil, fmt.Errorf("can't commit iltxn: %w", err)
}
iltxn = nil
@@ -110,5 +112,5 @@ func (il *IndexingLayer) ReplaceEvent(evt nostr.Event) error {
il.mmmm.mergeNewFreeRange(*acquiredFreeRangeFromDelete)
}
return nil
return deleted, nil
}
+22 -7
View File
@@ -5,7 +5,6 @@ import (
"fmt"
"os"
"runtime"
"slices"
"syscall"
"unsafe"
@@ -104,25 +103,41 @@ func (b *MultiMmapManager) storeOn(
return false, fmt.Errorf("event too large to store, max %d, got %d", 1<<16, pos.size)
}
// find a suitable place for this to be stored in
// find a suitable place for this to be stored in (search only large free ranges)
appendToMmap := true
for f, fr := range b.freeRanges {
for f, fr := range b.freeRangesLarge {
if fr.size >= pos.size {
// found the smallest possible place that can fit this event
// found a place that can fit this event
appendToMmap = false
pos.start = fr.start
// modify the free ranges we're keeping track of
// (in case of conflict we lose this free range but it's ok, it will be recovered on the next startup)
if pos.size == fr.size {
// if we've used it entirely just delete it
b.freeRanges = slices.Delete(b.freeRanges, f, f+1)
// if we've used it entirely just delete it (swap-delete since it's unsorted)
b.freeRangesLarge[f] = b.freeRangesLarge[len(b.freeRangesLarge)-1]
b.freeRangesLarge = b.freeRangesLarge[0 : len(b.freeRangesLarge)-1]
// also delete it from b.freeRangesAll
b.freeRangesAll = b.freeRangesAll.del(fr.start)
} else {
// otherwise modify it in place
b.freeRanges[f] = position{
newFreeRange := position{
start: fr.start + uint64(pos.size),
size: fr.size - pos.size,
}
// only keep it in freeRangesLarge if it's still large enough
if newFreeRange.size >= LARGE_FREERANGE {
b.freeRangesLarge[f] = newFreeRange
} else {
// remove it from freeRangesLarge if it's no longer large enough
b.freeRangesLarge[f] = b.freeRangesLarge[len(b.freeRangesLarge)-1]
b.freeRangesLarge = b.freeRangesLarge[0 : len(b.freeRangesLarge)-1]
}
// also modify it in b.freeRangesAll
idx := b.freeRangesAll.find(fr.start)
b.freeRangesAll[idx] = newFreeRange
}
break
+2
View File
@@ -42,6 +42,8 @@ func (il *IndexingLayer) ComputeStats(opts StatsOptions) (EventStats, error) {
}
err := il.lmdbEnv.View(func(txn *lmdb.Txn) error {
txn.RawRead = true
cursor, err := txn.OpenCursor(il.indexPubkeyKind)
if err != nil {
return err
+2 -2
View File
@@ -29,8 +29,8 @@ func (b NullStore) SaveEvent(evt nostr.Event) error {
return nil
}
func (b NullStore) ReplaceEvent(evt nostr.Event) error {
return nil
func (b NullStore) ReplaceEvent(evt nostr.Event) ([]nostr.Event, error) {
return nil, nil
}
func (b NullStore) CountEvents(filter nostr.Filter) (uint32, error) {
+5 -4
View File
@@ -122,7 +122,7 @@ func (b *SliceStore) delete(id nostr.ID) error {
return nil
}
func (b *SliceStore) ReplaceEvent(evt nostr.Event) error {
func (b *SliceStore) ReplaceEvent(evt nostr.Event) (deleted []nostr.Event, err error) {
b.Lock()
defer b.Unlock()
@@ -135,8 +135,9 @@ func (b *SliceStore) ReplaceEvent(evt nostr.Event) error {
for previous := range b.QueryEvents(filter, 1) {
if nostr.IsOlder(previous, evt) {
if err := b.delete(previous.ID); err != nil {
return fmt.Errorf("failed to delete event for replacing: %w", err)
return nil, fmt.Errorf("failed to delete event for replacing: %w", err)
}
deleted = append(deleted, previous)
} else {
shouldStore = false
}
@@ -144,11 +145,11 @@ func (b *SliceStore) ReplaceEvent(evt nostr.Event) error {
if shouldStore {
if err := b.save(evt); err != nil && err != eventstore.ErrDupEvent {
return fmt.Errorf("failed to save: %w", err)
return nil, fmt.Errorf("failed to save: %w", err)
}
}
return nil
return deleted, nil
}
func eventTimestampComparator(e nostr.Event, t nostr.Timestamp) int {
+1 -1
View File
@@ -26,7 +26,7 @@ type Store interface {
// ReplaceEvent atomically replaces a replaceable or addressable event.
// Conceptually it is like a Query->Delete->Save, but streamlined.
ReplaceEvent(nostr.Event) error
ReplaceEvent(nostr.Event) (deleted []nostr.Event, err error)
// CountEvents counts all events that match a given filter
CountEvents(nostr.Filter) (uint32, error)
+24 -6
View File
@@ -128,6 +128,24 @@ func basicTest(t *testing.T, db eventstore.Store) {
require.Len(t, results, 1)
require.Equal(t, events[5].ID, results[0].ID, "author + kind query error")
}
// test 5: until
{
results := slices.Collect(db.QueryEvents(nostr.Filter{Until: 102}, 1000))
require.NoError(t, err)
require.Len(t, results, 3)
resultsWithTag := slices.Collect(db.QueryEvents(nostr.Filter{
Until: 102,
Tags: nostr.TagMap{
"e": []string{
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
},
},
}, 1000))
require.NoError(t, err)
require.Len(t, resultsWithTag, 1)
}
}
// from another-basic-test.patch
@@ -223,7 +241,7 @@ func basicTest(t *testing.T, db eventstore.Store) {
}
originalProfile.Sign(sk3)
err = db.ReplaceEvent(originalProfile)
_, err = db.ReplaceEvent(originalProfile)
require.NoError(t, err)
// verify
@@ -244,7 +262,7 @@ func basicTest(t *testing.T, db eventstore.Store) {
newProfile.Sign(sk3)
// replace with newer event
err = db.ReplaceEvent(newProfile)
_, err = db.ReplaceEvent(newProfile)
require.NoError(t, err)
// verify only the newer event exists
@@ -264,7 +282,7 @@ func basicTest(t *testing.T, db eventstore.Store) {
}
olderProfile.Sign(sk3)
err = db.ReplaceEvent(olderProfile)
_, err = db.ReplaceEvent(olderProfile)
require.NoError(t, err)
// verify the newer event is still there
@@ -284,7 +302,7 @@ func basicTest(t *testing.T, db eventstore.Store) {
}
articleV1.Sign(sk3)
err = db.ReplaceEvent(articleV1)
_, err = db.ReplaceEvent(articleV1)
require.NoError(t, err)
// verify article was saved
@@ -305,7 +323,7 @@ func basicTest(t *testing.T, db eventstore.Store) {
}
articleV2.Sign(sk3)
err = db.ReplaceEvent(articleV2)
_, err = db.ReplaceEvent(articleV2)
require.NoError(t, err)
// verify only the newer version exists
@@ -327,7 +345,7 @@ func basicTest(t *testing.T, db eventstore.Store) {
}
differentArticle.Sign(sk3)
err = db.ReplaceEvent(differentArticle)
_, err = db.ReplaceEvent(differentArticle)
require.NoError(t, err)
// verify both articles exist (different d tags)
+1
View File
@@ -33,6 +33,7 @@ var tests = []struct {
{"manyauthors", manyAuthorsTest},
{"unbalanced", unbalancedTest},
{"count", countTest},
{"pfilter-until", pTagUntilMismatchTest},
}
func TestSliceStore(t *testing.T) {
+92
View File
@@ -0,0 +1,92 @@
package test
import (
"encoding/hex"
"slices"
"testing"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore"
"github.com/stretchr/testify/require"
)
func pTagUntilMismatchTest(t *testing.T, db eventstore.Store) {
err := db.Init()
require.NoError(t, err)
targetP := "460c25e682fda7832b52d1f22d3d22b3176d972f60dcdc3212ed8c92ef85065c"
author := nostr.MustPubKeyFromHex("7fa56f5d6962ab1e3cd424e758c3002b8665f7b0d8dcee9fe9e288d7751ac194")
events := []nostr.Event{
{
Kind: 9802,
ID: nostr.MustIDFromHex("2c997233fa580b1a831f989d8fa320c409f8412d9c75b819c9a29df102d7f901"),
PubKey: author,
CreatedAt: 1773835689,
Tags: nostr.Tags{
{"e", "bcaa6599e69cff48ed6ab4b0b315d4f33a869a4ba8fa808287700faebe17195f", "wss://nos.lol/", "source"},
{"p", targetP, "", "author"},
},
Content: "With so few people donating in the zap the devs button, the incentives are quite low to produce cool new things",
Sig: sigFromHex(t, "b49206476c4d2a5f44590331541c83910fd826c0f4cdab99ceffd5bcf3aca94935e3db9d7820e7db3a0f1165c43a28dd3173a81fd08bf8348629ea4efde02537"),
},
{
Kind: 9802,
ID: nostr.MustIDFromHex("31c1eddb3a5201ef1bbce91b9fb3b7d8fe3e3eb25a66bedadcbc93c84d072c7d"),
PubKey: author,
CreatedAt: 1773154080,
Tags: nostr.Tags{
{"p", "5ea4648045bb1ff222655ddd36e6dceddc43590c26090c486bef38ef450da5bd", "", "mention"},
{"p", "c8fb0d3aa788b9ace4f6cb92dd97d3f292db25b5c9f92462ef6c64926129fbaf", "", "mention"},
{"p", "2f29aa33c2a3b45c2ef32212879248b2f4a49a002bd0de0fa16c94e138ac6f13", "", "mention"},
{"p", targetP, "", "mention"},
{"comment", "normie"},
{"e", "5911eeba39a6886fe8abea82bb50612d27d1273d63904c9b64cde070c7088d48", "wss://relay.primal.net/", "source"},
{"p", "3f770d65d3a764a9c5cb503ae123e62ec7598ad035d836e2a810f3877a745b24", "", "author"},
},
Content: "grimoire is cool, but it's too nerdy for me",
Sig: sigFromHex(t, "0ee01515d54293d52fa1247a395e64c8499df96eee80c30204cb7c8fc5b5977023e6ac00cc240f137ce7e594818545340bf74bce6d3de86539f1a5d26fe33f24"),
},
{
Kind: 9802,
ID: nostr.MustIDFromHex("baeb90e2075c9d8a9b41286dbf1c52e5ef8ad6c030118839ce24d065e72df9b7"),
PubKey: author,
CreatedAt: 1773154058,
Tags: nostr.Tags{
{"p", "c8fb0d3aa788b9ace4f6cb92dd97d3f292db25b5c9f92462ef6c64926129fbaf", "", "mention"},
{"p", "2f29aa33c2a3b45c2ef32212879248b2f4a49a002bd0de0fa16c94e138ac6f13", "", "mention"},
{"p", targetP, "", "mention"},
{"p", "3f770d65d3a764a9c5cb503ae123e62ec7598ad035d836e2a810f3877a745b24", "", "mention"},
{"comment", "no lies detected"},
{"e", "81171c564cedbc5f07e5b7a9d06842d1f43a81cd79c8755921190382de55c514", "wss://nos.lol/", "source"},
{"p", "5ea4648045bb1ff222655ddd36e6dceddc43590c26090c486bef38ef450da5bd", "", "author"},
},
Content: "i never used grimoire once in my life, but that is not the point, it is still the best client",
Sig: sigFromHex(t, "3e4b855c4e3a4d2b3a078d593e728f1bbdb07af91ba5831b7866e2d16df90ce58a5f9f1db5733603911b20b334bc7fc5ae1482c7870b9f7acde4a8ccc080a79d"),
},
}
for _, evt := range events {
err = db.SaveEvent(evt)
require.NoError(t, err)
}
results := slices.Collect(db.QueryEvents(nostr.Filter{
Until: 1733934976,
Limit: 3,
Tags: nostr.TagMap{"p": []string{targetP}},
}, 1000))
require.Len(t, results, 0)
}
func sigFromHex(t *testing.T, sigStr string) [64]byte {
t.Helper()
raw, err := hex.DecodeString(sigStr)
require.NoError(t, err)
require.Len(t, raw, 64)
var sig [64]byte
copy(sig[:], raw)
return sig
}
+41
View File
@@ -0,0 +1,41 @@
package wrappers
import (
"context"
"fmt"
"iter"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore"
)
var _ nostr.Publisher = DynamicPublisher{}
type DynamicPublisher struct {
GetStore func() eventstore.Store
MaxLimit int
}
func (w DynamicPublisher) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
return w.GetStore().QueryEvents(filter, w.MaxLimit)
}
func (w DynamicPublisher) Publish(ctx context.Context, evt nostr.Event) error {
if evt.Kind.IsEphemeral() {
return nil
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if evt.Kind.IsRegular() {
if err := w.GetStore().SaveEvent(evt); err != nil && err != eventstore.ErrDupEvent {
return fmt.Errorf("failed to save: %w", err)
} else {
return err
}
}
_, err := w.GetStore().ReplaceEvent(evt)
return err
}
+2 -1
View File
@@ -39,5 +39,6 @@ func (w StorePublisher) Publish(ctx context.Context, evt nostr.Event) error {
}
// others are replaced
return w.Store.ReplaceEvent(evt)
_, err := w.Store.ReplaceEvent(evt)
return err
}
+1
View File
@@ -44,6 +44,7 @@ func (ef Filter) Matches(event Event) bool {
return true
}
//go:inline
func (ef Filter) MatchesIgnoringTimestampConstraints(event Event) bool {
if ef.IDs != nil && !slices.Contains(ef.IDs, event.ID) {
return false
+4
View File
@@ -40,8 +40,10 @@ require (
)
require (
fiatjaf.com/lib v0.3.7
github.com/dgraph-io/ristretto/v2 v2.3.0
github.com/go-git/go-git/v5 v5.16.3
github.com/pemistahl/lingua-go v1.4.0
github.com/sivukhin/godjot v1.0.6
github.com/templexxx/cpu v0.0.1
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b
@@ -63,6 +65,7 @@ require (
github.com/blevesearch/scorch_segment_api/v2 v2.2.16 // indirect
github.com/blevesearch/segment v0.9.1 // indirect
github.com/blevesearch/snowballstem v0.9.0 // indirect
github.com/blevesearch/stempel v0.2.0 // indirect
github.com/blevesearch/upsidedown_store_api v1.0.2 // indirect
github.com/blevesearch/vellum v1.0.11 // indirect
github.com/blevesearch/zapx/v11 v11.3.10 // indirect
@@ -93,6 +96,7 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
+8
View File
@@ -1,3 +1,5 @@
fiatjaf.com/lib v0.3.7 h1:mXZOn7NrUcjSdy4oNvwQyAmes7Ueb+Zr5hjqMIe2dxI=
fiatjaf.com/lib v0.3.7/go.mod h1:UlHaZvPHj25PtKLh9GjZkUHRmQ2xZ8Jkoa4VRaLeeQ8=
github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e h1:ahyvB3q25YnZWly5Gq1ekg6jcmWaGj/vG/MhF4aisoc=
github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e/go.mod h1:kGUqhHd//musdITWjFvNTHn90WG9bMLBEPQZ17Cmlpw=
github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec h1:1Qb69mGp/UtRPn422BH4/Y4Q3SLUrD9KHuDkm8iodFc=
@@ -40,6 +42,8 @@ github.com/blevesearch/segment v0.9.1 h1:+dThDy+Lvgj5JMxhmOVlgFfkUtZV2kw49xax4+j
github.com/blevesearch/segment v0.9.1/go.mod h1:zN21iLm7+GnBHWTao9I+Au/7MBiL8pPFtJBJTsk6kQw=
github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD/iWeNXm8s=
github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs=
github.com/blevesearch/stempel v0.2.0 h1:CYzVPaScODMvgE9o+kf6D4RJ/VRomyi9uHF+PtB+Afc=
github.com/blevesearch/stempel v0.2.0/go.mod h1:wjeTHqQv+nQdbPuJ/YcvOjTInA2EIc6Ks1FoSUzSLvc=
github.com/blevesearch/upsidedown_store_api v1.0.2 h1:U53Q6YoWEARVLd1OYNc9kvhBMGZzVrdmaozG2MfoB+A=
github.com/blevesearch/upsidedown_store_api v1.0.2/go.mod h1:M01mh3Gpfy56Ps/UXHjEO/knbqyQ1Oamg8If49gRwrQ=
github.com/blevesearch/vellum v1.0.11 h1:SJI97toEFTtA9WsDZxkyGTaBWFdWl1n2LEDCXLCq/AU=
@@ -190,6 +194,8 @@ github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/pemistahl/lingua-go v1.4.0 h1:ifYhthrlW7iO4icdubwlduYnmwU37V1sbNrwhKBR4rM=
github.com/pemistahl/lingua-go v1.4.0/go.mod h1:ECuM1Hp/3hvyh7k8aWSqNCPlTxLemFZsRjocUf3KgME=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -207,6 +213,8 @@ github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38 h1:D0vL7YNisV2yqE55+q0lFuGse6U8lxlg7fYTctlT5Gc=
github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38/go.mod h1:sM7Mt7uEoCeFSCBM+qBrqvEo+/9vdmj19wzp3yzUhmg=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sivukhin/godjot v1.0.6 h1:yoRD+hlcDbSxP9Gd/KRVlEFXgtGyZyt0CHwhY6Gk3EQ=
github.com/sivukhin/godjot v1.0.6/go.mod h1:wA6KdR4Z+XpwdwyViPDLWYYxT72pKjNc6XGA9I025gM=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-40
View File
@@ -92,46 +92,6 @@ func similarPublicKey(as, bs []PubKey) bool {
return true
}
// Escaping strings for JSON encoding according to RFC8259.
// Also encloses result in quotation marks "".
func escapeString(dst []byte, s string) []byte {
dst = append(dst, '"')
for i := 0; i < len(s); i++ {
c := s[i]
switch {
case c == '"':
// quotation mark
dst = append(dst, []byte{'\\', '"'}...)
case c == '\\':
// reverse solidus
dst = append(dst, []byte{'\\', '\\'}...)
case c >= 0x20:
// default, rest below are control chars
dst = append(dst, c)
case c == 0x08:
dst = append(dst, []byte{'\\', 'b'}...)
case c < 0x09:
dst = append(dst, []byte{'\\', 'u', '0', '0', '0', '0' + c}...)
case c == 0x09:
dst = append(dst, []byte{'\\', 't'}...)
case c == 0x0a:
dst = append(dst, []byte{'\\', 'n'}...)
case c == 0x0c:
dst = append(dst, []byte{'\\', 'f'}...)
case c == 0x0d:
dst = append(dst, []byte{'\\', 'r'}...)
case c < 0x10:
dst = append(dst, []byte{'\\', 'u', '0', '0', '0', 0x57 + c}...)
case c < 0x1a:
dst = append(dst, []byte{'\\', 'u', '0', '0', '1', 0x20 + c}...)
case c < 0x20:
dst = append(dst, []byte{'\\', 'u', '0', '0', '1', 0x47 + c}...)
}
}
dst = append(dst, '"')
return dst
}
func subIdToSerial(subId string) int64 {
n := strings.Index(subId, ":")
if n < 0 || n > len(subId) {
+3
View File
@@ -78,6 +78,9 @@ func (rl *Relay) handleDeleteRequest(ctx context.Context, evt nostr.Event) error
}
haveDeletedSomething = true
if rl.OnEventDeleted != nil {
rl.OnEventDeleted(ctx, target)
}
return nil
})
} else {
+265
View File
@@ -0,0 +1,265 @@
package khatru
import (
"slices"
"strconv"
"testing"
"fiatjaf.com/nostr"
"github.com/stretchr/testify/require"
)
func TestDispatcherCandidates(t *testing.T) {
d := newDispatcher()
d.addSubscription(subscription{
id: "...",
filter: nostr.Filter{
Kinds: []nostr.Kind{9},
Tags: nostr.TagMap{"h": []string{"aaa"}},
},
})
d.addSubscription(subscription{
id: "...",
filter: nostr.Filter{
Kinds: []nostr.Kind{11},
Tags: nostr.TagMap{"h": []string{"aaa"}},
},
})
d.addSubscription(subscription{
id: "...",
filter: nostr.Filter{
Kinds: []nostr.Kind{9, 11, 1111},
Tags: nostr.TagMap{"h": []string{"aaa"}},
},
})
d.addSubscription(subscription{
id: "...",
filter: nostr.Filter{
Kinds: []nostr.Kind{9, 11, 1111},
Tags: nostr.TagMap{"h": []string{"bbb"}},
},
})
d.addSubscription(subscription{
id: "...",
filter: nostr.Filter{
Kinds: []nostr.Kind{9, 11, 1111},
Authors: []nostr.PubKey{
nostr.MustPubKeyFromHex("87f5650744bed197fcb170ae05fd8d1948a24b2aac34cedf7bdb1c47d6d93273"),
},
},
})
matched := 0
for range d.candidates(nostr.Event{
PubKey: nostr.MustPubKeyFromHex("87f5650744bed197fcb170ae05fd8d1948a24b2aac34cedf7bdb1c47d6d93273"),
ID: nostr.MustIDFromHex("87f5650744bed197fcb170ae05fd8d1948a24b2aac34cedf7bdb1c47d6d93273"),
Kind: 9,
CreatedAt: nostr.Now(),
Content: "hello",
Tags: nostr.Tags{
{"h", "aaa"},
},
}) {
matched++
}
require.Equal(t, 3, matched)
}
func FuzzDispatcherCandidates(f *testing.F) {
f.Add(1, 1, uint8(8), uint8(16))
f.Add(2, 3, uint8(32), uint8(32))
f.Fuzz(func(t *testing.T, seed int, advance int, ops uint8, checks uint8) {
d := newDispatcher()
state := fuzzState{value: seed, advance: advance}
active := make(map[int]subscription)
activeSSIDs := make([]int, 0, int(ops))
nextSubID := 0
steps := int(ops) + 1
for range steps {
if len(activeSSIDs) == 0 || state.next(10) != 0 {
nextSubID++
sub := subscription{
id: strconv.Itoa(nextSubID),
filter: fuzzDispatcherFilter(&state),
}
ssid := d.addSubscription(sub)
active[ssid] = sub
activeSSIDs = append(activeSSIDs, ssid)
} else {
idx := state.next(len(activeSSIDs))
ssid := activeSSIDs[idx]
d.removeSubscription(ssid)
delete(active, ssid)
activeSSIDs = append(activeSSIDs[:idx], activeSSIDs[idx+1:]...)
}
for range int(checks%7) + 1 {
event := fuzzDispatcherEvent(&state)
expected := expectedDispatcherCandidates(active, event)
actual := collectedDispatcherCandidates(&d, event)
require.Equalf(t, expected, actual, "seed=%d advance=%d event=%s active=%v", seed, advance, event.String(), active)
}
}
for _, ssid := range activeSSIDs {
d.removeSubscription(ssid)
delete(active, ssid)
}
require.Empty(t, collectedDispatcherCandidates(&d, fuzzDispatcherEvent(&state)))
})
}
type fuzzState struct {
value int
advance int
}
func (state *fuzzState) next(n int) int {
if n <= 0 {
return 0
}
value := state.value % n
if value < 0 {
value += n
}
state.value += state.advance
return value
}
func fuzzDispatcherFilter(seed *fuzzState) nostr.Filter {
filter := nostr.Filter{
Authors: fuzzDispatcherAuthors(seed),
Kinds: fuzzDispatcherKinds(seed),
Tags: fuzzDispatcherTagMap(seed),
}
if seed.next(3) == 0 {
since := nostr.Timestamp(seed.next(6))
until := since + nostr.Timestamp(seed.next(6))
filter.Since = since
filter.Until = until
} else if seed.next(4) == 0 {
filter.Since = nostr.Timestamp(seed.next(8))
} else if seed.next(4) == 0 {
filter.Until = nostr.Timestamp(seed.next(8))
}
return filter
}
func fuzzDispatcherAuthors(seed *fuzzState) []nostr.PubKey {
switch seed.next(4) {
case 0:
return nil
case 1:
return []nostr.PubKey{}
}
count := seed.next(3) + 1
authors := make([]nostr.PubKey, 0, count)
for range count {
pk := nostr.PubKey{byte(seed.next(4) + 1)}
if !slices.Contains(authors, pk) {
authors = append(authors, pk)
}
}
return authors
}
func fuzzDispatcherKinds(seed *fuzzState) []nostr.Kind {
switch seed.next(4) {
case 0:
return nil
case 1:
return []nostr.Kind{}
}
count := seed.next(3) + 1
kinds := make([]nostr.Kind, 0, count)
for range count {
kind := nostr.Kind(seed.next(5) + 1)
if !slices.Contains(kinds, kind) {
kinds = append(kinds, kind)
}
}
return kinds
}
func fuzzDispatcherTagMap(seed *fuzzState) nostr.TagMap {
if seed.next(3) == 0 {
return nil
}
keys := []string{"e", "p", "t"}
values := []string{"a", "b", "c", "d"}
count := seed.next(3)
if count == 0 {
return nil
}
tags := make(nostr.TagMap, count)
start := seed.next(len(keys))
for i := range count {
idx := (start + i) % len(keys)
valueCount := seed.next(3) + 1
entries := make([]string, 0, valueCount)
for range valueCount {
value := values[seed.next(len(values))]
if !slices.Contains(entries, value) {
entries = append(entries, value)
}
}
tags[keys[idx]] = entries
}
return tags
}
func fuzzDispatcherEvent(seed *fuzzState) nostr.Event {
tags := make(nostr.Tags, 0, seed.next(4))
keys := []string{"e", "p", "t"}
values := []string{"a", "b", "c", "d"}
for range cap(tags) {
tags = append(tags, nostr.Tag{keys[seed.next(len(keys))], values[seed.next(len(values))]})
}
return nostr.Event{
PubKey: nostr.PubKey{byte(seed.next(4) + 1)},
Kind: nostr.Kind(seed.next(5) + 1),
CreatedAt: nostr.Timestamp(seed.next(8)),
Tags: tags,
}
}
func expectedDispatcherCandidates(active map[int]subscription, event nostr.Event) []string {
ids := make([]string, 0, len(active))
for _, sub := range active {
if sub.filter.Matches(event) {
ids = append(ids, sub.id)
}
}
slices.Sort(ids)
return ids
}
func collectedDispatcherCandidates(d *dispatcher, event nostr.Event) []string {
ids := make([]string, 0, d.subscriptions.Size())
for sub := range d.candidates(event) {
ids = append(ids, sub.id)
}
slices.Sort(ids)
return ids
}
-58
View File
@@ -1,58 +0,0 @@
---
outline: deep
---
# Request Routing
If you have one (or more) set of policies that have to be executed in sequence (for example, first you check for the presence of a tag, then later in the next policies you use that tag without checking) and they only apply to some class of events, but you still want your relay to deal with other classes of events that can lead to cumbersome sets of rules, always having to check if an event meets the requirements and so on. There is where routing can help you.
```go
sk := os.Getenv("RELAY_SECRET_KEY")
// a relay for NIP-29 groups
groupsStore := boltdb.BoltBackend{}
groupsStore.Init()
groupsRelay, _ := khatru29.Init(relay29.Options{Domain: "example.com", DB: groupsStore, SecretKey: sk})
// ...
// a relay for everything else
publicStore := slicestore.SliceStore{}
publicStore.Init()
publicRelay := khatru.NewRelay()
publicRelay.UseEventStore(publicStore, 1000)
// ...
// a higher-level relay that just routes between the two above
router := khatru.NewRouter()
// route requests and events to the groups relay
router.Route().
Req(func (filter nostr.Filter) bool {
_, hasHTag := filter.Tags["h"]
if hasHTag {
return true
}
return slices.Contains(filter.Kinds, func (k int) bool { return k == 39000 || k == 39001 || k == 39002 })
}).
Event(func (event *nostr.Event) bool {
switch {
case event.Kind <= 9021 && event.Kind >= 9000:
return true
case event.Kind <= 39010 && event.Kind >= 39000:
return true
case event.Kind <= 12 && event.Kind >= 9:
return true
case event.Tags.Find("h") != nil:
return true
default:
return false
}
}).
Relay(groupsRelay)
// route requests and events to the other
router.Route().
Req(func (filter nostr.Filter) bool { return true }).
Event(func (event *nostr.Event) bool { return true }).
Relay(publicRelay)
```
-61
View File
@@ -1,61 +0,0 @@
package main
import (
"fmt"
"net/http"
"slices"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore/lmdb"
"fiatjaf.com/nostr/eventstore/slicestore"
"fiatjaf.com/nostr/khatru"
)
func main() {
db1 := &slicestore.SliceStore{}
db1.Init()
r1 := khatru.NewRelay()
r1.UseEventstore(db1, 400)
db2 := &lmdb.LMDBBackend{Path: "/tmp/t"}
db2.Init()
r2 := khatru.NewRelay()
r2.UseEventstore(db2, 400)
db3 := &slicestore.SliceStore{}
db3.Init()
r3 := khatru.NewRelay()
r3.UseEventstore(db3, 400)
router := khatru.NewRouter()
router.Route().
Req(func(filter nostr.Filter) bool {
return slices.Contains(filter.Kinds, 30023)
}).
Event(func(event *nostr.Event) bool {
return event.Kind == 30023
}).
Relay(r1)
router.Route().
Req(func(filter nostr.Filter) bool {
return slices.Contains(filter.Kinds, 1) && slices.Contains(filter.Tags["t"], "spam")
}).
Event(func(event *nostr.Event) bool {
return event.Kind == 1 && event.Tags.FindWithValue("t", "spam") != nil
}).
Relay(r2)
router.Route().
Req(func(filter nostr.Filter) bool {
return slices.Contains(filter.Kinds, 1)
}).
Event(func(event *nostr.Event) bool {
return event.Kind == 1
}).
Relay(r3)
fmt.Println("running on :3334")
http.ListenAndServe(":3334", router)
}
+17 -5
View File
@@ -39,9 +39,15 @@ type expirationManager struct {
events expiringEventHeap
mu sync.Mutex
// a function to query the relay database, generally the same as relay.queryStored
queryStored func(ctx context.Context, filter nostr.Filter) iter.Seq[nostr.Event]
// a function to delete an event from the relay database, generally the same as relay.DeleteEvent
deleteEvent func(ctx context.Context, id nostr.ID) error
// a function to call after an event has been deleted, generally the same as relay.OnEventDeleted
deleteCallback func(ctx context.Context, id nostr.Event)
interval time.Duration
initialScanDone bool
kill chan struct{} // used for manually killing this
@@ -109,7 +115,11 @@ func (em *expirationManager) checkExpiredEvents(ctx context.Context) {
heap.Pop(&em.events)
ctx := context.WithValue(ctx, internalCallKey, struct{}{})
em.deleteEvent(ctx, next.id)
if nil == em.deleteEvent(ctx, next.id) && em.deleteCallback != nil {
for evt := range em.queryStored(ctx, nostr.Filter{IDs: []nostr.ID{next.id}}) {
em.deleteCallback(ctx, evt)
}
}
}
}
@@ -142,12 +152,14 @@ func (em *expirationManager) removeEvent(id nostr.ID) {
func (rl *Relay) StartExpirationManager(
queryStored func(ctx context.Context, filter nostr.Filter) iter.Seq[nostr.Event],
deleteEvent func(ctx context.Context, id nostr.ID) error,
onDeleteCallback func(ctx context.Context, evt nostr.Event),
) {
rl.expirationManager = &expirationManager{
events: make(expiringEventHeap, 0),
queryStored: queryStored,
deleteEvent: deleteEvent,
queryStored: queryStored,
deleteEvent: deleteEvent,
deleteCallback: onDeleteCallback,
interval: time.Hour,
kill: make(chan struct{}),
@@ -155,14 +167,14 @@ func (rl *Relay) StartExpirationManager(
}
go rl.expirationManager.start(rl.ctx)
rl.Info.AddSupportedNIP(40)
rl.Info.AddSupportedNIP("40")
}
func (rl *Relay) DisableExpirationManager() {
rl.expirationManager.stop()
rl.expirationManager = nil
idx := slices.Index(rl.Info.SupportedNIPs, 40)
idx := slices.Index(rl.Info.SupportedNIPs, "40")
if idx != -1 {
rl.Info.SupportedNIPs[idx] = rl.Info.SupportedNIPs[len(rl.Info.SupportedNIPs)-1]
rl.Info.SupportedNIPs = rl.Info.SupportedNIPs[0 : len(rl.Info.SupportedNIPs)-1]
-65
View File
@@ -1,65 +0,0 @@
package khatru
import (
"context"
"net"
"net/http"
"strconv"
"time"
"github.com/fasthttp/websocket"
"github.com/rs/cors"
)
func (rl *Relay) Router() *http.ServeMux {
return rl.serveMux
}
func (rl *Relay) SetRouter(mux *http.ServeMux) {
rl.serveMux = mux
}
// Start creates an http server and starts listening on given host and port.
func (rl *Relay) Start(host string, port int, started ...chan bool) error {
addr := net.JoinHostPort(host, strconv.Itoa(port))
ln, err := net.Listen("tcp", addr)
if err != nil {
return err
}
rl.Addr = ln.Addr().String()
rl.httpServer = &http.Server{
Handler: cors.Default().Handler(rl),
Addr: addr,
WriteTimeout: 2 * time.Second,
ReadTimeout: 2 * time.Second,
IdleTimeout: 30 * time.Second,
}
// notify caller that we're starting
for _, started := range started {
close(started)
}
if err := rl.httpServer.Serve(ln); err == http.ErrServerClosed {
return nil
} else if err != nil {
return err
} else {
return nil
}
}
// Shutdown sends a websocket close control message to all connected clients.
func (rl *Relay) Shutdown(ctx context.Context) {
rl.httpServer.Shutdown(ctx)
rl.clientsMutex.Lock()
defer rl.clientsMutex.Unlock()
for ws := range rl.clients {
ws.conn.WriteControl(websocket.CloseMessage, nil, time.Now().Add(time.Second))
ws.cancel()
ws.conn.Close()
}
clear(rl.clients)
rl.listeners = rl.listeners[:0]
}
+1 -1
View File
@@ -31,7 +31,7 @@ func New(rl *khatru.Relay, repositoryDir string) *GraspServer {
},
}
rl.Info.AddSupportedNIP(34)
rl.Info.AddSupportedNIP("34")
rl.Info.SupportedGrasps = append(rl.Info.SupportedGrasps, "GRASP-01")
base := rl.Router()
+32 -42
View File
@@ -43,8 +43,8 @@ func (rl *Relay) ServeHTTP(w http.ResponseWriter, r *http.Request) {
})
relayPathMatches := true
if rl.ServiceURL != "" {
p, err := url.Parse(rl.ServiceURL)
if serviceURL := rl.getServiceURL(r); serviceURL != "" {
p, err := url.Parse(serviceURL)
if err == nil {
relayPathMatches = strings.TrimSuffix(r.URL.Path, "/") == strings.TrimSuffix(p.Path, "/")
}
@@ -108,17 +108,20 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
),
)
killOnce := sync.Once{}
kill := func() {
if nil != rl.OnDisconnect {
rl.OnDisconnect(ctx)
}
killOnce.Do(func() {
if nil != rl.OnDisconnect {
rl.OnDisconnect(ctx)
}
ticker.Stop()
cancel()
ws.cancel()
ws.conn.Close()
ticker.Stop()
cancel()
ws.cancel()
ws.conn.Close()
rl.removeClientAndListeners(ws)
rl.removeClientAndListeners(ws)
})
}
go func() {
@@ -214,35 +217,30 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
return
}
srl := rl
if rl.getSubRelayFromEvent != nil {
srl = rl.getSubRelayFromEvent(&env.Event)
}
var ok bool
var writeErr error
var skipBroadcast bool
if env.Event.Kind == nostr.KindDeletion {
// store the delete event first
skipBroadcast, writeErr = srl.handleNormal(ctx, env.Event)
skipBroadcast, writeErr = rl.handleNormal(ctx, env.Event)
if writeErr == nil {
// this always returns "blocked: " whenever it returns an error
writeErr = srl.handleDeleteRequest(ctx, env.Event)
writeErr = rl.handleDeleteRequest(ctx, env.Event)
}
} else if env.Event.Kind.IsEphemeral() {
// this will also always return a prefixed reason
writeErr = srl.handleEphemeral(ctx, env.Event)
writeErr = rl.handleEphemeral(ctx, env.Event)
} else {
// this will also always return a prefixed reason
skipBroadcast, writeErr = srl.handleNormal(ctx, env.Event)
skipBroadcast, writeErr = rl.handleNormal(ctx, env.Event)
}
var reason string
if writeErr == nil {
ok = true
if !skipBroadcast {
n := srl.notifyListeners(env.Event, false)
n := rl.notifyListeners(env.Event, false)
// the number of notified listeners matters in ephemeral events
if env.Event.Kind.IsEphemeral() {
@@ -275,15 +273,10 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
var total uint32
var hll *hyperloglog.HyperLogLog
srl := rl
if rl.getSubRelayFromFilter != nil {
srl = rl.getSubRelayFromFilter(env.Filter)
}
if offset := nip45.HyperLogLogEventPubkeyOffsetForFilter(env.Filter); offset != -1 {
total, hll = srl.handleCountRequestWithHLL(ctx, ws, env.Filter, offset)
total, hll = rl.handleCountRequestWithHLL(ctx, ws, env.Filter, offset)
} else {
total = srl.handleCountRequest(ctx, ws, env.Filter)
total = rl.handleCountRequest(ctx, ws, env.Filter)
}
resp := nostr.CountEnvelope{
@@ -297,6 +290,8 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
ws.WriteJSON(resp)
case *nostr.ReqEnvelope:
rl.removeListenerId(ws, env.SubscriptionID)
eose := sync.WaitGroup{}
eose.Add(len(env.Filters))
@@ -308,11 +303,7 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
// handle each filter separately -- dispatching events as they're loaded from databases
for _, filter := range env.Filters {
srl := rl
if rl.getSubRelayFromFilter != nil {
srl = rl.getSubRelayFromFilter(filter)
}
err := srl.handleRequest(reqCtx, env.SubscriptionID, &eose, ws, filter)
err := rl.handleRequest(reqCtx, env.SubscriptionID, &eose, ws, filter)
if err != nil {
// fail everything if any filter is rejected
reason := err.Error()
@@ -322,8 +313,11 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
ws.WriteJSON(nostr.ClosedEnvelope{SubscriptionID: env.SubscriptionID, Reason: reason})
cancelReqCtx(errors.New("filter rejected"))
return
} else {
rl.addListener(ws, env.SubscriptionID, srl, filter, cancelReqCtx)
} else if filter.IDs == nil {
// a query that is just a bunch of "ids": [...] will not add listeners.
// is this a bug? maybe, but I don't think anyone is listening for an ID
// that hasn't been published yet anywhere -- if yes we can change later
rl.addListener(ws, env.SubscriptionID, filter, cancelReqCtx)
}
}
@@ -360,15 +354,11 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
ws.WriteJSON(nostr.OKEnvelope{EventID: env.Event.ID, OK: false, Reason: "error: failed to authenticate: " + err.Error()})
}
case *nip77.OpenEnvelope:
srl := rl
if rl.getSubRelayFromFilter != nil {
srl = rl.getSubRelayFromFilter(env.Filter)
if !srl.Negentropy {
// ignore
return
}
if !rl.Negentropy {
// ignore
return
}
vec, err := srl.startNegentropySession(ctx, env.Filter)
vec, err := rl.startNegentropySession(ctx, env.Filter)
if err != nil {
// fail everything if any filter is rejected
reason := err.Error()
+298 -73
View File
@@ -3,18 +3,20 @@ package khatru
import (
"context"
"errors"
"slices"
"iter"
"sync"
"fiatjaf.com/lib/set"
"fiatjaf.com/nostr"
"github.com/puzpuzpuz/xsync/v3"
)
var ErrSubscriptionClosedByClient = errors.New("subscription closed by client")
type listenerSpec struct {
id string // kept here so we can easily match against it removeListenerId
cancel context.CancelCauseFunc
index int
subrelay *Relay // this is important when we're dealing with routing, otherwise it will be always the same
ssid int // internal numeric id for a listener
sid string // client-provided subscription id
cancel context.CancelCauseFunc
}
type listener struct {
@@ -23,10 +25,262 @@ type listener struct {
ws *WebSocket
}
type subscription struct {
id string
filter nostr.Filter
ws *WebSocket
}
type dispatcher struct {
serial int
subscriptions *xsync.MapOf[int, subscription]
byAuthor *xsync.MapOf[nostr.PubKey, set.Set[int]]
byKind *xsync.MapOf[nostr.Kind, set.Set[int]]
fallbackTags set.Set[int]
fallbackNothing set.Set[int]
}
var setPool = sync.Pool{
New: func() any {
return set.NewEmptySliceSetReusing[int](make([]int, 0, 10))
},
}
func newDispatcher() dispatcher {
return dispatcher{
subscriptions: xsync.NewMapOf[int, subscription](),
byAuthor: xsync.NewMapOf[nostr.PubKey, set.Set[int]](),
byKind: xsync.NewMapOf[nostr.Kind, set.Set[int]](),
fallbackTags: setPool.Get().(set.Set[int]),
fallbackNothing: setPool.Get().(set.Set[int]),
}
}
func (d *dispatcher) addSubscription(sub subscription) int {
d.serial++
ssid := d.serial
d.subscriptions.Store(ssid, sub)
indexed := false
if sub.filter.Authors != nil {
indexed = true
for _, author := range sub.filter.Authors {
d.byAuthor.Compute(author, func(s set.Set[int], loaded bool) (set.Set[int], bool) {
if !loaded {
s = setPool.Get().(set.Set[int])
}
s.Add(ssid)
return s, false
})
}
}
if sub.filter.Kinds != nil {
indexed = true
for _, kind := range sub.filter.Kinds {
d.byKind.Compute(kind, func(s set.Set[int], loaded bool) (set.Set[int], bool) {
if !loaded {
s = setPool.Get().(set.Set[int])
}
s.Add(ssid)
return s, false
})
}
}
if !indexed {
if sub.filter.Tags != nil {
d.fallbackTags.Add(ssid)
} else {
d.fallbackNothing.Add(ssid)
}
}
return ssid
}
func (d *dispatcher) removeSubscription(ssid int) nostr.Filter {
var filter nostr.Filter
d.subscriptions.Compute(ssid, func(sub subscription, loaded bool) (subscription, bool) {
indexed := false
filter = sub.filter
if sub.filter.Authors != nil {
indexed = true
for _, author := range sub.filter.Authors {
d.byAuthor.Compute(author, func(s set.Set[int], loaded bool) (set.Set[int], bool) {
if !loaded {
return s, true
}
s.Remove(ssid)
delete := s.Len() == 0
if delete {
setPool.Put(s)
}
return s, delete
})
}
}
if sub.filter.Kinds != nil {
indexed = true
for _, kind := range sub.filter.Kinds {
d.byKind.Compute(kind, func(s set.Set[int], loaded bool) (set.Set[int], bool) {
if !loaded {
return s, true
}
s.Remove(ssid)
delete := s.Len() == 0
if delete {
setPool.Put(s)
}
return s, delete
})
}
}
if !indexed {
if sub.filter.Tags != nil {
d.fallbackTags.Remove(ssid)
} else {
d.fallbackNothing.Remove(ssid)
}
}
return sub, true
})
return filter
}
func (d *dispatcher) candidates(event nostr.Event) iter.Seq[subscription] {
return func(yield func(subscription) bool) {
authorSubs, hasAuthorSubs := d.byAuthor.Load(event.PubKey)
kindSubs, hasKindSubs := d.byKind.Load(event.Kind)
if hasAuthorSubs && hasKindSubs {
for _, ssid := range authorSubs.Slice() {
sub, _ := d.subscriptions.Load(ssid)
if kindSubs.Has(ssid) || sub.filter.Kinds == nil {
if filterMatchesTimestampConstraintsAndTags(sub.filter, event) {
if !yield(sub) {
return
}
}
}
}
for _, ssid := range kindSubs.Slice() {
sub, _ := d.subscriptions.Load(ssid)
if sub.filter.Authors != nil {
continue
}
if filterMatchesTimestampConstraintsAndTags(sub.filter, event) {
if !yield(sub) {
return
}
}
}
} else if hasAuthorSubs {
for _, ssid := range authorSubs.Slice() {
sub, _ := d.subscriptions.Load(ssid)
if sub.filter.Kinds != nil {
// if there are any kinds in the filter we already know this doesn't qualify
continue
}
if filterMatchesTimestampConstraintsAndTags(sub.filter, event) {
if !yield(sub) {
return
}
}
}
} else if hasKindSubs {
for _, ssid := range kindSubs.Slice() {
sub, _ := d.subscriptions.Load(ssid)
if sub.filter.Authors != nil {
// if there are any authors in the filter we already know this doesn't qualify
continue
}
if filterMatchesTimestampConstraintsAndTags(sub.filter, event) {
if !yield(sub) {
return
}
}
}
}
if len(event.Tags) > 0 {
for _, ssid := range d.fallbackTags.Slice() {
sub, _ := d.subscriptions.Load(ssid)
if filterMatchesTimestampConstraintsAndTags(sub.filter, event) {
if !yield(sub) {
return
}
}
}
}
for _, ssid := range d.fallbackNothing.Slice() {
sub, _ := d.subscriptions.Load(ssid)
if filterMatchesTimestampConstraints(sub.filter, event) {
if !yield(sub) {
return
}
}
}
}
}
//go:inline
func filterMatchesTimestampConstraints(filter nostr.Filter, event nostr.Event) bool {
if filter.Since != 0 && event.CreatedAt < filter.Since {
return false
}
if filter.Until != 0 && event.CreatedAt > filter.Until {
return false
}
return true
}
//go:inline
func filterMatchesTimestampConstraintsAndTags(filter nostr.Filter, event nostr.Event) bool {
if !filterMatchesTimestampConstraints(filter, event) {
return false
}
for f, v := range filter.Tags {
if !event.Tags.ContainsAny(f, v) {
return false
}
}
return true
}
//go:inline
func tagKeyValueKey(tagKey, tagValue string) string {
return tagKey + "\x00" + tagValue
}
func (rl *Relay) GetListeningFilters() []nostr.Filter {
respfilters := make([]nostr.Filter, len(rl.listeners))
for i, l := range rl.listeners {
respfilters[i] = l.filter
respfilters := make([]nostr.Filter, 0, rl.dispatcher.subscriptions.Size())
for _, sub := range rl.dispatcher.subscriptions.Range {
respfilters = append(respfilters, sub.filter)
}
return respfilters
}
@@ -36,26 +290,31 @@ func (rl *Relay) GetListeningFilters() []nostr.Filter {
func (rl *Relay) addListener(
ws *WebSocket,
id string,
subrelay *Relay,
filter nostr.Filter,
cancel context.CancelCauseFunc,
) {
rl.clientsMutex.Lock()
defer rl.clientsMutex.Unlock()
select {
case <-rl.clientsMutex.C():
defer rl.clientsMutex.Unlock()
case <-ws.Context.Done():
return
}
if specs, ok := rl.clients[ws]; ok /* this will always be true unless client has disconnected very rapidly */ {
idx := len(subrelay.listeners)
rl.clients[ws] = append(specs, listenerSpec{
id: id,
cancel: cancel,
subrelay: subrelay,
index: idx,
})
subrelay.listeners = append(subrelay.listeners, listener{
ssid := rl.dispatcher.addSubscription(subscription{
ws: ws,
id: id,
filter: filter,
})
rl.clients[ws] = append(specs, listenerSpec{
ssid: ssid,
cancel: cancel,
sid: id,
})
if rl.OnListenerAdded != nil {
rl.OnListenerAdded(ws, ssid, id, filter)
}
}
}
@@ -66,35 +325,21 @@ func (rl *Relay) removeListenerId(ws *WebSocket, id string) {
defer rl.clientsMutex.Unlock()
if specs, ok := rl.clients[ws]; ok {
// swap delete specs that match this id
for s := len(specs) - 1; s >= 0; s-- {
spec := specs[s]
if spec.id == id {
kept := specs[:0]
for _, spec := range specs {
if spec.sid == id {
spec.cancel(ErrSubscriptionClosedByClient)
specs[s] = specs[len(specs)-1]
specs = specs[0 : len(specs)-1]
rl.clients[ws] = specs
filter := rl.dispatcher.removeSubscription(spec.ssid)
// swap delete listeners one at a time, as they may be each in a different subrelay
srl := spec.subrelay // == rl in normal cases, but different when this came from a route
if spec.index != len(srl.listeners)-1 {
movedFromIndex := len(srl.listeners) - 1
moved := srl.listeners[movedFromIndex] // this wasn't removed, but will be moved
srl.listeners[spec.index] = moved
// now we must update the the listener we just moved
// so its .index reflects its new position on srl.listeners
movedSpecs := rl.clients[moved.ws]
idx := slices.IndexFunc(movedSpecs, func(ls listenerSpec) bool {
return ls.index == movedFromIndex && ls.subrelay == srl
})
movedSpecs[idx].index = spec.index
rl.clients[moved.ws] = movedSpecs
if rl.OnListenerRemoved != nil {
rl.OnListenerRemoved(ws, spec.ssid, id, filter)
}
srl.listeners = srl.listeners[0 : len(srl.listeners)-1] // finally reduce the slice length
continue
}
kept = append(kept, spec)
}
rl.clients[ws] = kept
}
}
@@ -102,31 +347,13 @@ func (rl *Relay) removeClientAndListeners(ws *WebSocket) {
rl.clientsMutex.Lock()
defer rl.clientsMutex.Unlock()
if specs, ok := rl.clients[ws]; ok {
// swap delete listeners and delete client (all specs will be deleted)
for s, spec := range specs {
for _, spec := range specs {
// no need to cancel contexts since they inherit from the main connection context
// just delete the listeners (swap-delete)
srl := spec.subrelay
filter := rl.dispatcher.removeSubscription(spec.ssid)
if spec.index != len(srl.listeners)-1 {
movedFromIndex := len(srl.listeners) - 1
moved := srl.listeners[movedFromIndex] // this wasn't removed, but will be moved
srl.listeners[spec.index] = moved
// temporarily update the spec of the listener being removed to have index == -1
// (since it was removed) so it doesn't match in the search below
rl.clients[ws][s].index = -1
// now we must update the the listener we just moved
// so its .index reflects its new position on srl.listeners
movedSpecs := rl.clients[moved.ws]
idx := slices.IndexFunc(movedSpecs, func(ls listenerSpec) bool {
return ls.index == movedFromIndex && ls.subrelay == srl
})
movedSpecs[idx].index = spec.index
rl.clients[moved.ws] = movedSpecs
if rl.OnListenerRemoved != nil {
rl.OnListenerRemoved(ws, spec.ssid, spec.sid, filter)
}
srl.listeners = srl.listeners[0 : len(srl.listeners)-1] // finally reduce the slice length
}
}
delete(rl.clients, ws)
@@ -136,16 +363,14 @@ func (rl *Relay) removeClientAndListeners(ws *WebSocket) {
func (rl *Relay) notifyListeners(event nostr.Event, skipPrevent bool) int {
count := 0
listenersloop:
for _, listener := range rl.listeners {
if listener.filter.Matches(event) {
if !skipPrevent && nil != rl.PreventBroadcast {
if rl.PreventBroadcast(listener.ws, listener.filter, event) {
continue listenersloop
}
for sub := range rl.dispatcher.candidates(event) {
if !skipPrevent && nil != rl.PreventBroadcast {
if rl.PreventBroadcast(sub.ws, sub.filter, event) {
continue listenersloop
}
listener.ws.WriteJSON(nostr.EventEnvelope{SubscriptionID: &listener.id, Event: event})
count++
}
sub.ws.WriteJSON(nostr.EventEnvelope{SubscriptionID: &sub.id, Event: event})
count++
}
return count
}
+34 -33
View File
@@ -1,7 +1,6 @@
package khatru
import (
"math/rand"
"testing"
"fiatjaf.com/nostr"
@@ -25,7 +24,7 @@ func FuzzRandomListenerClientRemoving(f *testing.F) {
l := 0
for i := 0; i < totalWebsockets; i++ {
ws := &WebSocket{}
ws := &WebSocket{Context: rl.ctx}
websockets = append(websockets, ws)
rl.clients[ws] = nil
}
@@ -38,7 +37,7 @@ func FuzzRandomListenerClientRemoving(f *testing.F) {
if s%addListenerFreq == 0 {
l++
rl.addListener(ws, w+":"+idFromSeqLower(j), rl, f, cancel)
rl.addListener(ws, w+":"+idFromSeqLower(j), f, cancel)
}
s++
@@ -46,14 +45,22 @@ func FuzzRandomListenerClientRemoving(f *testing.F) {
}
require.Len(t, rl.clients, totalWebsockets)
require.Len(t, rl.listeners, l)
ssidCount := 0
for _, specs := range rl.clients {
ssidCount += len(specs)
}
require.Equal(t, l, ssidCount)
for ws := range rl.clients {
rl.removeClientAndListeners(ws)
}
require.Len(t, rl.clients, 0)
require.Len(t, rl.listeners, 0)
ssidCount = 0
for _, specs := range rl.clients {
ssidCount += len(specs)
}
require.Equal(t, 0, ssidCount)
})
}
@@ -84,7 +91,7 @@ func FuzzRandomListenerIdRemoving(f *testing.F) {
extra := 0
for i := 0; i < totalWebsockets; i++ {
ws := &WebSocket{}
ws := &WebSocket{Context: rl.ctx}
websockets = append(websockets, ws)
rl.clients[ws] = nil
}
@@ -97,11 +104,11 @@ func FuzzRandomListenerIdRemoving(f *testing.F) {
if s%addListenerFreq == 0 {
id := w + ":" + idFromSeqLower(j)
rl.addListener(ws, id, rl, f, cancel)
rl.addListener(ws, id, f, cancel)
subs = append(subs, wsid{ws, id})
if s%addExtraListenerFreq == 0 {
rl.addListener(ws, id, rl, f, cancel)
rl.addListener(ws, id, f, cancel)
extra++
}
}
@@ -111,16 +118,21 @@ func FuzzRandomListenerIdRemoving(f *testing.F) {
}
require.Len(t, rl.clients, totalWebsockets)
require.Len(t, rl.listeners, len(subs)+extra)
ssidCount := 0
for _, specs := range rl.clients {
ssidCount += len(specs)
}
require.Equal(t, len(subs)+extra, ssidCount)
rand.Shuffle(len(subs), func(i, j int) {
subs[i], subs[j] = subs[j], subs[i]
})
for _, wsidToRemove := range subs {
for _, wsidToRemove := range moduloOrder(subs, int(utw+ubs+ualf+ualef)) {
rl.removeListenerId(wsidToRemove.ws, wsidToRemove.id)
}
require.Len(t, rl.listeners, 0)
ssidCount = 0
for _, specs := range rl.clients {
ssidCount += len(specs)
}
require.Equal(t, 0, ssidCount)
require.Len(t, rl.clients, totalWebsockets)
for _, specs := range rl.clients {
require.Len(t, specs, 0)
@@ -129,23 +141,17 @@ func FuzzRandomListenerIdRemoving(f *testing.F) {
}
func FuzzRouterListenersPabloCrash(f *testing.F) {
f.Add(uint(3), uint(6), uint(2), uint(20))
f.Fuzz(func(t *testing.T, totalRelays uint, totalConns uint, subFreq uint, subIterations uint) {
totalRelays++
f.Add(uint(6), uint(2), uint(20))
f.Fuzz(func(t *testing.T, totalConns uint, subFreq uint, subIterations uint) {
totalConns++
subFreq++
subIterations++
rl := NewRelay()
relays := make([]*Relay, int(totalRelays))
for i := 0; i < int(totalRelays); i++ {
relays[i] = NewRelay()
}
conns := make([]*WebSocket, int(totalConns))
for i := 0; i < int(totalConns); i++ {
ws := &WebSocket{}
ws := &WebSocket{Context: rl.ctx}
conns[i] = ws
rl.clients[ws] = make([]listenerSpec, 0, subIterations)
}
@@ -159,18 +165,16 @@ func FuzzRouterListenersPabloCrash(f *testing.F) {
}
s := 0
subs := make([]wsid, 0, subIterations*totalConns*totalRelays)
subs := make([]wsid, 0, subIterations*totalConns)
for i, conn := range conns {
w := idFromSeqUpper(i)
for j := 0; j < int(subIterations); j++ {
id := w + ":" + idFromSeqLower(j)
for _, rlt := range relays {
if s%int(subFreq) == 0 {
rl.addListener(conn, id, rlt, f, cancel)
subs = append(subs, wsid{conn, id})
}
s++
if s%int(subFreq) == 0 {
rl.addListener(conn, id, f, cancel)
subs = append(subs, wsid{conn, id})
}
s++
}
}
@@ -181,8 +185,5 @@ func FuzzRouterListenersPabloCrash(f *testing.F) {
for _, wsid := range subs {
require.Len(t, rl.clients[wsid.ws], 0)
}
for _, rlt := range relays {
require.Len(t, rlt.listeners, 0)
}
})
}
+133 -233
View File
@@ -1,7 +1,6 @@
package khatru
import (
"math/rand"
"strings"
"testing"
@@ -23,11 +22,23 @@ func idFromSeq(seq int, min, max int) string {
return result.String()
}
func moduloOrder[T any](items []T, seed int) []T {
remaining := append([]T(nil), items...)
ordered := make([]T, 0, len(items))
for len(remaining) > 0 {
idx := seed % len(remaining)
ordered = append(ordered, remaining[idx])
remaining = append(remaining[:idx], remaining[idx+1:]...)
seed++
}
return ordered
}
func TestListenerSetupAndRemoveOnce(t *testing.T) {
rl := NewRelay()
ws1 := &WebSocket{}
ws2 := &WebSocket{}
ws1 := &WebSocket{Context: rl.ctx}
ws2 := &WebSocket{Context: rl.ctx}
f1 := nostr.Filter{Kinds: []nostr.Kind{1}}
f2 := nostr.Filter{Kinds: []nostr.Kind{2}}
@@ -39,28 +50,21 @@ func TestListenerSetupAndRemoveOnce(t *testing.T) {
var cancel func(cause error) = nil
t.Run("adding listeners", func(t *testing.T) {
rl.addListener(ws1, "1a", rl, f1, cancel)
rl.addListener(ws1, "1b", rl, f2, cancel)
rl.addListener(ws2, "2a", rl, f3, cancel)
rl.addListener(ws1, "1c", rl, f3, cancel)
rl.addListener(ws1, "1a", f1, cancel)
rl.addListener(ws1, "1b", f2, cancel)
rl.addListener(ws2, "2a", f3, cancel)
rl.addListener(ws1, "1c", f3, cancel)
require.Equal(t, map[*WebSocket][]listenerSpec{
ws1: {
{"1a", cancel, 0, rl},
{"1b", cancel, 1, rl},
{"1c", cancel, 3, rl},
{1, "1a", cancel},
{2, "1b", cancel},
{4, "1c", cancel},
},
ws2: {
{"2a", cancel, 2, rl},
{3, "2a", cancel},
},
}, rl.clients)
require.Equal(t, []listener{
{"1a", f1, ws1},
{"1b", f2, ws1},
{"2a", f3, ws2},
{"1c", f3, ws1},
}, rl.listeners)
})
t.Run("removing a client", func(t *testing.T) {
@@ -68,23 +72,19 @@ func TestListenerSetupAndRemoveOnce(t *testing.T) {
require.Equal(t, map[*WebSocket][]listenerSpec{
ws2: {
{"2a", cancel, 0, rl},
{3, "2a", cancel},
},
}, rl.clients)
require.Equal(t, []listener{
{"2a", f3, ws2},
}, rl.listeners)
})
}
func TestListenerMoreConvolutedCase(t *testing.T) {
rl := NewRelay()
ws1 := &WebSocket{}
ws2 := &WebSocket{}
ws3 := &WebSocket{}
ws4 := &WebSocket{}
ws1 := &WebSocket{Context: rl.ctx}
ws2 := &WebSocket{Context: rl.ctx}
ws3 := &WebSocket{Context: rl.ctx}
ws4 := &WebSocket{Context: rl.ctx}
f1 := nostr.Filter{Kinds: []nostr.Kind{1}}
f2 := nostr.Filter{Kinds: []nostr.Kind{2}}
@@ -98,35 +98,27 @@ func TestListenerMoreConvolutedCase(t *testing.T) {
var cancel func(cause error) = nil
t.Run("adding listeners", func(t *testing.T) {
rl.addListener(ws1, "c", rl, f1, cancel)
rl.addListener(ws2, "b", rl, f2, cancel)
rl.addListener(ws3, "a", rl, f3, cancel)
rl.addListener(ws4, "d", rl, f3, cancel)
rl.addListener(ws2, "b", rl, f1, cancel)
rl.addListener(ws1, "c", f1, cancel)
rl.addListener(ws2, "b", f2, cancel)
rl.addListener(ws3, "a", f3, cancel)
rl.addListener(ws4, "d", f3, cancel)
rl.addListener(ws2, "b", f1, cancel)
require.Equal(t, map[*WebSocket][]listenerSpec{
ws1: {
{"c", cancel, 0, rl},
{1, "c", cancel},
},
ws2: {
{"b", cancel, 1, rl},
{"b", cancel, 4, rl},
{2, "b", cancel},
{5, "b", cancel},
},
ws3: {
{"a", cancel, 2, rl},
{3, "a", cancel},
},
ws4: {
{"d", cancel, 3, rl},
{4, "d", cancel},
},
}, rl.clients)
require.Equal(t, []listener{
{"c", f1, ws1},
{"b", f2, ws2},
{"a", f3, ws3},
{"d", f3, ws4},
{"b", f1, ws2},
}, rl.listeners)
})
t.Run("removing a client", func(t *testing.T) {
@@ -134,85 +126,62 @@ func TestListenerMoreConvolutedCase(t *testing.T) {
require.Equal(t, map[*WebSocket][]listenerSpec{
ws1: {
{"c", cancel, 0, rl},
{1, "c", cancel},
},
ws3: {
{"a", cancel, 2, rl},
{3, "a", cancel},
},
ws4: {
{"d", cancel, 1, rl},
{4, "d", cancel},
},
}, rl.clients)
require.Equal(t, []listener{
{"c", f1, ws1},
{"d", f3, ws4},
{"a", f3, ws3},
}, rl.listeners)
})
t.Run("reorganize the first case differently and then remove again", func(t *testing.T) {
rl.clients = map[*WebSocket][]listenerSpec{
ws1: {
{"c", cancel, 1, rl},
{2, "c", cancel},
},
ws2: {
{"b", cancel, 2, rl},
{"b", cancel, 4, rl},
{3, "b", cancel},
{5, "b", cancel},
},
ws3: {
{"a", cancel, 0, rl},
{1, "a", cancel},
},
ws4: {
{"d", cancel, 3, rl},
{4, "d", cancel},
},
}
rl.listeners = []listener{
{"a", f3, ws3},
{"c", f1, ws1},
{"b", f2, ws2},
{"d", f3, ws4},
{"b", f1, ws2},
}
rl.removeClientAndListeners(ws2)
require.Equal(t, map[*WebSocket][]listenerSpec{
ws1: {
{"c", cancel, 1, rl},
{2, "c", cancel},
},
ws3: {
{"a", cancel, 0, rl},
{1, "a", cancel},
},
ws4: {
{"d", cancel, 2, rl},
{4, "d", cancel},
},
}, rl.clients)
require.Equal(t, []listener{
{"a", f3, ws3},
{"c", f1, ws1},
{"d", f3, ws4},
}, rl.listeners)
})
}
func TestListenerMoreStuffWithMultipleRelays(t *testing.T) {
rl := NewRelay()
ws1 := &WebSocket{}
ws2 := &WebSocket{}
ws3 := &WebSocket{}
ws4 := &WebSocket{}
ws1 := &WebSocket{Context: rl.ctx}
ws2 := &WebSocket{Context: rl.ctx}
ws3 := &WebSocket{Context: rl.ctx}
ws4 := &WebSocket{Context: rl.ctx}
f1 := nostr.Filter{Kinds: []nostr.Kind{1}}
f2 := nostr.Filter{Kinds: []nostr.Kind{2}}
f3 := nostr.Filter{Kinds: []nostr.Kind{3}}
rlx := NewRelay()
rly := NewRelay()
rlz := NewRelay()
rl.clients[ws1] = nil
rl.clients[ws2] = nil
rl.clients[ws3] = nil
@@ -221,56 +190,37 @@ func TestListenerMoreStuffWithMultipleRelays(t *testing.T) {
var cancel func(cause error) = nil
t.Run("adding listeners", func(t *testing.T) {
rl.addListener(ws1, "c", rlx, f1, cancel)
rl.addListener(ws2, "b", rly, f2, cancel)
rl.addListener(ws3, "a", rlz, f3, cancel)
rl.addListener(ws4, "d", rlx, f3, cancel)
rl.addListener(ws4, "e", rlx, f3, cancel)
rl.addListener(ws3, "a", rlx, f3, cancel)
rl.addListener(ws4, "e", rly, f3, cancel)
rl.addListener(ws3, "f", rly, f3, cancel)
rl.addListener(ws1, "g", rlz, f1, cancel)
rl.addListener(ws2, "g", rlz, f2, cancel)
rl.addListener(ws1, "c", f1, cancel)
rl.addListener(ws2, "b", f2, cancel)
rl.addListener(ws3, "a", f3, cancel)
rl.addListener(ws4, "d", f3, cancel)
rl.addListener(ws4, "e", f3, cancel)
rl.addListener(ws3, "a", f3, cancel)
rl.addListener(ws4, "e", f3, cancel)
rl.addListener(ws3, "f", f3, cancel)
rl.addListener(ws1, "g", f1, cancel)
rl.addListener(ws2, "g", f2, cancel)
require.Equal(t, map[*WebSocket][]listenerSpec{
ws1: {
{"c", cancel, 0, rlx},
{"g", cancel, 1, rlz},
{1, "c", cancel},
{9, "g", cancel},
},
ws2: {
{"b", cancel, 0, rly},
{"g", cancel, 2, rlz},
{2, "b", cancel},
{10, "g", cancel},
},
ws3: {
{"a", cancel, 0, rlz},
{"a", cancel, 3, rlx},
{"f", cancel, 2, rly},
{3, "a", cancel},
{6, "a", cancel},
{8, "f", cancel},
},
ws4: {
{"d", cancel, 1, rlx},
{"e", cancel, 2, rlx},
{"e", cancel, 1, rly},
{4, "d", cancel},
{5, "e", cancel},
{7, "e", cancel},
},
}, rl.clients)
require.Equal(t, []listener{
{"c", f1, ws1},
{"d", f3, ws4},
{"e", f3, ws4},
{"a", f3, ws3},
}, rlx.listeners)
require.Equal(t, []listener{
{"b", f2, ws2},
{"e", f3, ws4},
{"f", f3, ws3},
}, rly.listeners)
require.Equal(t, []listener{
{"a", f3, ws3},
{"g", f1, ws1},
{"g", f2, ws2},
}, rlz.listeners)
})
t.Run("removing a subscription id", func(t *testing.T) {
@@ -280,41 +230,23 @@ func TestListenerMoreStuffWithMultipleRelays(t *testing.T) {
require.Equal(t, map[*WebSocket][]listenerSpec{
ws1: {
{"c", cancel, 0, rlx},
{"g", cancel, 1, rlz},
{1, "c", cancel},
{9, "g", cancel},
},
ws2: {
{"b", cancel, 0, rly},
{"g", cancel, 2, rlz},
{2, "b", cancel},
{10, "g", cancel},
},
ws3: {
{"a", cancel, 0, rlz},
{"a", cancel, 1, rlx},
{"f", cancel, 2, rly},
{3, "a", cancel},
{6, "a", cancel},
{8, "f", cancel},
},
ws4: {
{"e", cancel, 1, rly},
{"e", cancel, 2, rlx},
{5, "e", cancel},
{7, "e", cancel},
},
}, rl.clients)
require.Equal(t, []listener{
{"c", f1, ws1},
{"a", f3, ws3},
{"e", f3, ws4},
}, rlx.listeners)
require.Equal(t, []listener{
{"b", f2, ws2},
{"e", f3, ws4},
{"f", f3, ws3},
}, rly.listeners)
require.Equal(t, []listener{
{"a", f3, ws3},
{"g", f1, ws1},
{"g", f2, ws2},
}, rlz.listeners)
})
t.Run("removing another subscription id", func(t *testing.T) {
@@ -325,37 +257,21 @@ func TestListenerMoreStuffWithMultipleRelays(t *testing.T) {
require.Equal(t, map[*WebSocket][]listenerSpec{
ws1: {
{"c", cancel, 0, rlx},
{"g", cancel, 1, rlz},
{1, "c", cancel},
{9, "g", cancel},
},
ws2: {
{"b", cancel, 0, rly},
{"g", cancel, 0, rlz},
{2, "b", cancel},
{10, "g", cancel},
},
ws3: {
{"f", cancel, 2, rly},
{8, "f", cancel},
},
ws4: {
{"e", cancel, 1, rly},
{"e", cancel, 1, rlx},
{5, "e", cancel},
{7, "e", cancel},
},
}, rl.clients)
require.Equal(t, []listener{
{"c", f1, ws1},
{"e", f3, ws4},
}, rlx.listeners)
require.Equal(t, []listener{
{"b", f2, ws2},
{"e", f3, ws4},
{"f", f3, ws3},
}, rly.listeners)
require.Equal(t, []listener{
{"g", f2, ws2},
{"g", f1, ws1},
}, rlz.listeners)
})
t.Run("removing a connection", func(t *testing.T) {
@@ -363,31 +279,17 @@ func TestListenerMoreStuffWithMultipleRelays(t *testing.T) {
require.Equal(t, map[*WebSocket][]listenerSpec{
ws1: {
{"c", cancel, 0, rlx},
{"g", cancel, 0, rlz},
{1, "c", cancel},
{9, "g", cancel},
},
ws3: {
{"f", cancel, 0, rly},
{8, "f", cancel},
},
ws4: {
{"e", cancel, 1, rly},
{"e", cancel, 1, rlx},
{5, "e", cancel},
{7, "e", cancel},
},
}, rl.clients)
require.Equal(t, []listener{
{"c", f1, ws1},
{"e", f3, ws4},
}, rlx.listeners)
require.Equal(t, []listener{
{"f", f3, ws3},
{"e", f3, ws4},
}, rly.listeners)
require.Equal(t, []listener{
{"g", f1, ws1},
}, rlz.listeners)
})
t.Run("removing another subscription id", func(t *testing.T) {
@@ -398,26 +300,14 @@ func TestListenerMoreStuffWithMultipleRelays(t *testing.T) {
require.Equal(t, map[*WebSocket][]listenerSpec{
ws1: {
{"c", cancel, 0, rlx},
{"g", cancel, 0, rlz},
{1, "c", cancel},
{9, "g", cancel},
},
ws3: {
{"f", cancel, 0, rly},
{8, "f", cancel},
},
ws4: {},
}, rl.clients)
require.Equal(t, []listener{
{"c", f1, ws1},
}, rlx.listeners)
require.Equal(t, []listener{
{"f", f3, ws3},
}, rly.listeners)
require.Equal(t, []listener{
{"g", f1, ws1},
}, rlz.listeners)
})
}
@@ -432,7 +322,7 @@ func TestRandomListenerClientRemoving(t *testing.T) {
l := 0
for i := 0; i < 20; i++ {
ws := &WebSocket{}
ws := &WebSocket{Context: rl.ctx}
websockets = append(websockets, ws)
rl.clients[ws] = nil
}
@@ -442,22 +332,30 @@ func TestRandomListenerClientRemoving(t *testing.T) {
ws := websockets[i]
w := idFromSeqUpper(i)
if rand.Intn(2) < 1 {
if (i+j)%2 == 0 {
l++
rl.addListener(ws, w+":"+idFromSeqLower(j), rl, f, cancel)
rl.addListener(ws, w+":"+idFromSeqLower(j), f, cancel)
}
}
}
require.Len(t, rl.clients, 20)
require.Len(t, rl.listeners, l)
ssidCount := 0
for _, specs := range rl.clients {
ssidCount += len(specs)
}
require.Equal(t, l, ssidCount)
for ws := range rl.clients {
rl.removeClientAndListeners(ws)
}
require.Len(t, rl.clients, 0)
require.Len(t, rl.listeners, 0)
ssidCount = 0
for _, specs := range rl.clients {
ssidCount += len(specs)
}
require.Equal(t, 0, ssidCount)
}
func TestRandomListenerIdRemoving(t *testing.T) {
@@ -477,7 +375,7 @@ func TestRandomListenerIdRemoving(t *testing.T) {
extra := 0
for i := 0; i < 20; i++ {
ws := &WebSocket{}
ws := &WebSocket{Context: rl.ctx}
websockets = append(websockets, ws)
rl.clients[ws] = nil
}
@@ -487,13 +385,13 @@ func TestRandomListenerIdRemoving(t *testing.T) {
ws := websockets[i]
w := idFromSeqUpper(i)
if rand.Intn(2) < 1 {
if (i+j)%2 == 0 {
id := w + ":" + idFromSeqLower(j)
rl.addListener(ws, id, rl, f, cancel)
rl.addListener(ws, id, f, cancel)
subs = append(subs, wsid{ws, id})
if rand.Intn(5) < 1 {
rl.addListener(ws, id, rl, f, cancel)
if (i+j)%5 == 0 {
rl.addListener(ws, id, f, cancel)
extra++
}
}
@@ -501,16 +399,21 @@ func TestRandomListenerIdRemoving(t *testing.T) {
}
require.Len(t, rl.clients, 20)
require.Len(t, rl.listeners, len(subs)+extra)
ssidCount := 0
for _, specs := range rl.clients {
ssidCount += len(specs)
}
require.Equal(t, len(subs)+extra, ssidCount)
rand.Shuffle(len(subs), func(i, j int) {
subs[i], subs[j] = subs[j], subs[i]
})
for _, wsidToRemove := range subs {
for _, wsidToRemove := range moduloOrder(subs, 20) {
rl.removeListenerId(wsidToRemove.ws, wsidToRemove.id)
}
require.Len(t, rl.listeners, 0)
ssidCount = 0
for _, specs := range rl.clients {
ssidCount += len(specs)
}
require.Equal(t, 0, ssidCount)
require.Len(t, rl.clients, 20)
for _, specs := range rl.clients {
require.Len(t, specs, 0)
@@ -520,12 +423,9 @@ func TestRandomListenerIdRemoving(t *testing.T) {
func TestRouterListenersPabloCrash(t *testing.T) {
rl := NewRelay()
rla := NewRelay()
rlb := NewRelay()
ws1 := &WebSocket{}
ws2 := &WebSocket{}
ws3 := &WebSocket{}
ws1 := &WebSocket{Context: rl.ctx}
ws2 := &WebSocket{Context: rl.ctx}
ws3 := &WebSocket{Context: rl.ctx}
rl.clients[ws1] = nil
rl.clients[ws2] = nil
@@ -534,11 +434,11 @@ func TestRouterListenersPabloCrash(t *testing.T) {
f := nostr.Filter{Kinds: []nostr.Kind{1}}
cancel := func(cause error) {}
rl.addListener(ws1, ":1", rla, f, cancel)
rl.addListener(ws2, ":1", rlb, f, cancel)
rl.addListener(ws3, "a", rlb, f, cancel)
rl.addListener(ws3, "b", rla, f, cancel)
rl.addListener(ws3, "c", rlb, f, cancel)
rl.addListener(ws1, ":1", f, cancel)
rl.addListener(ws2, ":1", f, cancel)
rl.addListener(ws3, "a", f, cancel)
rl.addListener(ws3, "b", f, cancel)
rl.addListener(ws3, "c", f, cancel)
rl.removeClientAndListeners(ws1)
rl.removeClientAndListeners(ws3)
+3 -3
View File
@@ -12,13 +12,13 @@ func (rl *Relay) HandleNIP11(w http.ResponseWriter, r *http.Request) {
info := *rl.Info
if nil != rl.DeleteEvent {
info.AddSupportedNIP(9)
info.AddSupportedNIP("9")
}
if nil != rl.Count {
info.AddSupportedNIP(45)
info.AddSupportedNIP("45")
}
if rl.Negentropy {
info.AddSupportedNIP(77)
info.AddSupportedNIP("77")
}
// resolve relative icon and banner URLs against base URL
+18
View File
@@ -21,8 +21,10 @@ type RelayManagementAPI struct {
BanPubKey func(ctx context.Context, pubkey nostr.PubKey, reason string) error
ListBannedPubKeys func(ctx context.Context) ([]nip86.PubKeyReason, error)
UnbanPubKey func(ctx context.Context, pubkey nostr.PubKey, reason string) error
AllowPubKey func(ctx context.Context, pubkey nostr.PubKey, reason string) error
ListAllowedPubKeys func(ctx context.Context) ([]nip86.PubKeyReason, error)
UnallowPubKey func(ctx context.Context, pubkey nostr.PubKey, reason string) error
ListEventsNeedingModeration func(ctx context.Context) ([]nip86.IDReason, error)
AllowEvent func(ctx context.Context, id nostr.ID, reason string) error
BanEvent func(ctx context.Context, id nostr.ID, reason string) error
@@ -168,6 +170,14 @@ func (rl *Relay) HandleNIP86(w http.ResponseWriter, r *http.Request) {
} else {
resp.Result = result
}
case nip86.UnbanPubKey:
if rl.ManagementAPI.UnbanPubKey == nil {
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
} else if err := rl.ManagementAPI.UnbanPubKey(ctx, thing.PubKey, thing.Reason); err != nil {
resp.Error = err.Error()
} else {
resp.Result = true
}
case nip86.AllowPubKey:
if rl.ManagementAPI.AllowPubKey == nil {
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
@@ -184,6 +194,14 @@ func (rl *Relay) HandleNIP86(w http.ResponseWriter, r *http.Request) {
} else {
resp.Result = result
}
case nip86.UnallowPubKey:
if rl.ManagementAPI.UnallowPubKey == nil {
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
} else if err := rl.ManagementAPI.UnallowPubKey(ctx, thing.PubKey, thing.Reason); err != nil {
resp.Error = err.Error()
} else {
resp.Result = true
}
case nip86.BanEvent:
if rl.ManagementAPI.BanEvent == nil {
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
+59
View File
@@ -3,6 +3,7 @@ package policies
import (
"context"
"fmt"
"iter"
"regexp"
"slices"
"strings"
@@ -110,6 +111,9 @@ func RejectEventsWithBase64Media(ctx context.Context, evt nostr.Event) (bool, st
}
func OnlyAllowNIP70ProtectedEvents(ctx context.Context, event nostr.Event) (reject bool, msg string) {
if event.Kind == 5 {
return false, ""
}
if nip70.IsProtected(event) {
return false, ""
}
@@ -120,6 +124,9 @@ var nostrReferencesPrefix = regexp.MustCompile(`\b(nevent1|npub1|nprofile1|note1
func RejectUnprefixedNostrReferences(ctx context.Context, event nostr.Event) (bool, string) {
content := sdk.GetMainContent(event)
if content == "" {
content = event.Content
}
// only do it for stuff that wasn't parsed as blocks already
// (since those are already good references or URLs)
@@ -144,3 +151,55 @@ func RejectUnprefixedNostrReferences(ctx context.Context, event nostr.Event) (bo
return false, ""
}
// PreventNormalDuplicates prevents normal events that refer to the same thing from being saved.
// For kinds 6, 7, 16, 1018 it checks "e" tags.
// For kind 1163 it checks "p" tags.
// For kinds 1163, 6, 16, 7516, 7517 it checks "a" tags.
func PreventNormalDuplicates(query func(nostr.Filter, int) iter.Seq[nostr.Event]) func(ctx context.Context, event nostr.Event) (bool, string) {
exists := func(event nostr.Event, tagName string) bool {
hasAll := true
for t := range event.Tags.FindAll(tagName) {
hasThis := false
for range query(nostr.Filter{
Authors: []nostr.PubKey{event.PubKey},
Kinds: []nostr.Kind{event.Kind},
Tags: nostr.TagMap{tagName: []string{t[1]}},
}, 1) {
hasThis = true
}
if !hasThis {
hasAll = false
break
}
}
return hasAll
}
return func(ctx context.Context, event nostr.Event) (bool, string) {
reject := false
switch event.Kind {
case 6:
reject = exists(event, "e") && exists(event, "a")
case 7:
reject = exists(event, "e") && exists(event, "a")
case 16:
reject = exists(event, "e") && exists(event, "a")
case 1018:
reject = exists(event, "e")
case 1163:
reject = exists(event, "p") && exists(event, "a")
case 7516:
reject = exists(event, "a")
case 7517:
reject = exists(event, "a")
}
if reject {
return true, "an event similar to this already exists"
}
return false, ""
}
}
+145 -15
View File
@@ -2,15 +2,18 @@ package khatru
import (
"context"
"encoding/base64"
"encoding/binary"
"iter"
"log"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
"unsafe"
"fiatjaf.com/lib/channelmutex"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore"
"fiatjaf.com/nostr/nip11"
@@ -30,7 +33,7 @@ func NewRelay() *Relay {
Info: &nip11.RelayInformationDocument{
Software: "https://pkg.go.dev/fiatjaf.com/nostr/khatru",
Version: "n/a",
SupportedNIPs: []any{1, 11, 42, 70, 86},
SupportedNIPs: []string{"1", "11", "42", "70", "86"},
},
upgrader: websocket.Upgrader{
@@ -39,8 +42,10 @@ func NewRelay() *Relay {
CheckOrigin: func(r *http.Request) bool { return true },
},
clients: make(map[*WebSocket][]listenerSpec, 100),
listeners: make([]listener, 0, 100),
clients: make(map[*WebSocket][]listenerSpec, 100),
clientsMutex: channelmutex.New(),
dispatcher: newDispatcher(),
serveMux: &http.ServeMux{},
@@ -69,6 +74,7 @@ type Relay struct {
ReplaceEvent func(ctx context.Context, event nostr.Event) error
DeleteEvent func(ctx context.Context, id nostr.ID) error
OnEventSaved func(ctx context.Context, event nostr.Event)
OnEventDeleted func(ctx context.Context, deleted nostr.Event)
OnEphemeralEvent func(ctx context.Context, event nostr.Event)
OnRequest func(ctx context.Context, filter nostr.Filter) (reject bool, msg string)
OnCount func(ctx context.Context, filter nostr.Filter) (reject bool, msg string)
@@ -78,17 +84,14 @@ type Relay struct {
RejectConnection func(r *http.Request) bool
OnConnect func(ctx context.Context)
OnDisconnect func(ctx context.Context)
OnListenerAdded func(ws *WebSocket, ssid int, id string, filter nostr.Filter)
OnListenerRemoved func(ws *WebSocket, ssid int, id string, filter nostr.Filter)
OverwriteRelayInformation func(ctx context.Context, r *http.Request, info nip11.RelayInformationDocument) nip11.RelayInformationDocument
PreventBroadcast func(ws *WebSocket, filter nostr.Filter, event nostr.Event) bool
// this can be ignored unless you know what you're doing
ChallengePrefix string
// these are used when this relays acts as a router
routes []Route
getSubRelayFromEvent func(*nostr.Event) *Relay // used for handling EVENTs
getSubRelayFromFilter func(nostr.Filter) *Relay // used for handling REQs
// setting up handlers here will enable these methods
ManagementAPI RelayManagementAPI
@@ -105,8 +108,8 @@ type Relay struct {
// keep a connection reference to all connected clients for Server.Shutdown
// also used for keeping track of who is listening to what
clients map[*WebSocket][]listenerSpec
listeners []listener
clientsMutex sync.Mutex
dispatcher dispatcher
clientsMutex *channelmutex.Mutex
// set this to true to support negentropy
Negentropy bool
@@ -148,19 +151,28 @@ func (rl *Relay) UseEventstore(store eventstore.Store, maxQueryLimit int) {
return store.SaveEvent(event)
}
rl.ReplaceEvent = func(ctx context.Context, event nostr.Event) error {
return store.ReplaceEvent(event)
_, err := store.ReplaceEvent(event)
return err
}
rl.DeleteEvent = func(ctx context.Context, id nostr.ID) error {
return store.DeleteEvent(id)
}
// only when using the eventstore we automatically set up the expiration manager
rl.StartExpirationManager(rl.QueryStored, rl.DeleteEvent)
rl.StartExpirationManager(func(ctx context.Context, filter nostr.Filter) iter.Seq[nostr.Event] {
return rl.QueryStored(ctx, filter)
}, func(ctx context.Context, id nostr.ID) error {
return rl.DeleteEvent(ctx, id)
}, func(ctx context.Context, evt nostr.Event) {
if rl.OnEventDeleted != nil {
rl.OnEventDeleted(ctx, evt)
}
})
}
func (rl *Relay) getBaseURL(r *http.Request) string {
if rl.ServiceURL != "" {
return rl.ServiceURL
if serviceURL := rl.getServiceURL(r); serviceURL != "" {
return serviceURL
}
host := r.Header.Get("X-Forwarded-Host")
@@ -184,3 +196,121 @@ func (rl *Relay) getBaseURL(r *http.Request) string {
return proto + "://" + host + r.URL.Path
}
func (rl *Relay) getServiceURL(r *http.Request) string {
if serviceURL, ok := r.Context().Value(serviceURLOverrideKey).(string); ok {
return serviceURL
}
return rl.ServiceURL
}
// Stats returns the current number of connected clients and open listeners.
func (rl *Relay) Stats() (clients, listeners int) {
rl.clientsMutex.Lock()
defer rl.clientsMutex.Unlock()
for _, specs := range rl.clients {
listeners += len(specs)
}
return len(rl.clients), listeners
}
type ClientInfo struct {
ID string
IP string
UserAgent string
Origin string
Authenticated []nostr.PubKey
SubscriptionCount int
}
type SubscriptionInfo struct {
ID string
Filter nostr.Filter
}
type ClientSnapshot struct {
ClientInfo
Subscriptions []SubscriptionInfo
}
func (rl *Relay) ListClients() []ClientInfo {
rl.clientsMutex.Lock()
defer rl.clientsMutex.Unlock()
clients := make([]ClientInfo, 0, len(rl.clients))
for ws, specs := range rl.clients {
clients = append(clients, ClientInfo{
ID: ws.GetID(),
IP: GetIPFromRequest(ws.Request),
UserAgent: ws.Request.UserAgent(),
Origin: ws.Request.Header.Get("Origin"),
Authenticated: ws.AuthedPublicKeys,
SubscriptionCount: len(specs),
})
}
return clients
}
func (rl *Relay) GetClientSnapshot(id string) (ClientSnapshot, bool) {
rl.clientsMutex.Lock()
defer rl.clientsMutex.Unlock()
ptrn, err := base64.RawURLEncoding.DecodeString(id)
if err != nil {
return ClientSnapshot{}, false
}
ptr := binary.LittleEndian.Uint64(ptrn)
// DANGEROUS:
// don't try to do anything with this `ws` object before we confirm it exists by checking the rl.clients map
ws := (*WebSocket)(unsafe.Pointer(uintptr(ptr)))
specs, ok := rl.clients[ws]
if !ok {
return ClientSnapshot{}, false
}
details := ClientSnapshot{
ClientInfo: ClientInfo{
ID: id,
IP: GetIPFromRequest(ws.Request),
UserAgent: ws.Request.UserAgent(),
Origin: ws.Request.Header.Get("Origin"),
Authenticated: ws.AuthedPublicKeys,
SubscriptionCount: len(specs),
},
Subscriptions: make([]SubscriptionInfo, 0, len(specs)),
}
for _, spec := range specs {
filter := nostr.Filter{}
if sub, ok := rl.dispatcher.subscriptions.Load(spec.ssid); ok {
filter = sub.filter
}
details.Subscriptions = append(details.Subscriptions, SubscriptionInfo{
ID: spec.sid,
Filter: filter,
})
}
return details, true
}
func (rl *Relay) Router() *http.ServeMux {
return rl.serveMux
}
func (rl *Relay) SetRouter(mux *http.ServeMux) {
rl.serveMux = mux
}
func (rl *Relay) WithServiceURL(serviceURL string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := context.WithValue(r.Context(), serviceURLOverrideKey, serviceURL)
rl.ServeHTTP(w, r.WithContext(ctx))
})
}
+5 -6
View File
@@ -3,7 +3,6 @@ package khatru
import (
"context"
"math"
"math/rand/v2"
"net/http/httptest"
"testing"
"time"
@@ -14,13 +13,15 @@ import (
)
func FuzzReplaceableEvents(f *testing.F) {
f.Add(uint(1), uint(2))
f.Add(1, 1, uint(2))
f.Fuzz(func(t *testing.T, seed uint, nevents uint) {
f.Fuzz(func(t *testing.T, seed int, advance int, nevents uint) {
if nevents == 0 {
return
}
state := fuzzState{value: seed, advance: advance}
relay := NewRelay()
store := &lmdb.LMDBBackend{Path: "/tmp/fuzz"}
store.Init()
@@ -67,12 +68,10 @@ func FuzzReplaceableEvents(f *testing.F) {
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)
defer cancel()
rnd := rand.New(rand.NewPCG(uint64(seed), 0))
newest := nostr.Timestamp(0)
for range nevents {
evt := createEvent(sk1, 0, `{"name":"blblbl"}`, nil)
evt.CreatedAt = nostr.Timestamp(rnd.Int64() % math.MaxUint32)
evt.CreatedAt = nostr.Timestamp(state.next(math.MaxUint32))
evt.Sign(sk1)
err = client1.Publish(ctx, evt)
if err != nil {
+89 -77
View File
@@ -2,6 +2,9 @@ package khatru
import (
"context"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"strconv"
"testing"
@@ -9,8 +12,60 @@ import (
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore/slicestore"
"fiatjaf.com/nostr/nip11"
"github.com/stretchr/testify/require"
)
func TestWithServiceURL(t *testing.T) {
relay := NewRelay()
relay.Info.Icon = "icon.png"
relay.Info.Banner = "banner.png"
relay.Router().HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusAccepted)
io.WriteString(w, "fallback")
})
handlerA := relay.WithServiceURL("https://a.example/relay")
handlerB := relay.WithServiceURL("https://b.example/relay")
t.Run("uses override for nip11 base url", func(t *testing.T) {
for _, tc := range []struct {
name string
handler http.Handler
expectedBase string
}{
{name: "first interface", handler: handlerA, expectedBase: "https://a.example/relay"},
{name: "second interface", handler: handlerB, expectedBase: "https://b.example/relay"},
} {
t.Run(tc.name, func(t *testing.T) {
req := httptest.NewRequest(http.MethodGet, "http://internal/relay", nil)
req.Header.Set("Accept", "application/nostr+json")
rr := httptest.NewRecorder()
tc.handler.ServeHTTP(rr, req)
require.Equal(t, http.StatusOK, rr.Code)
var info nip11.RelayInformationDocument
require.NoError(t, json.NewDecoder(rr.Body).Decode(&info))
require.Equal(t, tc.expectedBase+"/icon.png", info.Icon)
require.Equal(t, tc.expectedBase+"/banner.png", info.Banner)
})
}
})
t.Run("uses override for relay path matching", func(t *testing.T) {
req := httptest.NewRequest(http.MethodGet, "http://internal/not-relay", nil)
req.Header.Set("Accept", "application/nostr+json")
rr := httptest.NewRecorder()
handlerA.ServeHTTP(rr, req)
require.Equal(t, http.StatusAccepted, rr.Code)
require.Equal(t, "fallback", rr.Body.String())
})
}
func TestBasicRelayFunctionality(t *testing.T) {
// setup relay with in-memory store
relay := NewRelay()
@@ -46,15 +101,11 @@ func TestBasicRelayFunctionality(t *testing.T) {
// connect two test clients
url := "ws" + server.URL[4:]
client1, err := nostr.RelayConnect(t.Context(), url, nostr.RelayOptions{})
if err != nil {
t.Fatalf("failed to connect client1: %v", err)
}
require.NoError(t, err, "failed to connect client1")
defer client1.Close()
client2, err := nostr.RelayConnect(t.Context(), url, nostr.RelayOptions{})
if err != nil {
t.Fatalf("failed to connect client2: %v", err)
}
require.NoError(t, err, "failed to connect client2")
defer client2.Close()
// test 1: store and query events
@@ -64,18 +115,14 @@ func TestBasicRelayFunctionality(t *testing.T) {
evt1 := createEvent(sk1, 1, "hello world", nil)
err := client1.Publish(ctx, evt1)
if err != nil {
t.Fatalf("failed to publish event: %v", err)
}
require.NoError(t, err, "failed to publish event")
// Query the event back
sub, err := client2.Subscribe(ctx, nostr.Filter{
Authors: []nostr.PubKey{pk1},
Kinds: []nostr.Kind{1},
}, nostr.SubscriptionOptions{})
if err != nil {
t.Fatalf("failed to subscribe: %v", err)
}
require.NoError(t, err, "failed to subscribe")
defer sub.Unsub()
// Wait for event
@@ -85,7 +132,7 @@ func TestBasicRelayFunctionality(t *testing.T) {
t.Errorf("got wrong event: %v", env.ID)
}
case <-ctx.Done():
t.Fatal("timeout waiting for event")
require.FailNow(t, "timeout waiting for event")
}
})
@@ -99,17 +146,13 @@ func TestBasicRelayFunctionality(t *testing.T) {
Authors: []nostr.PubKey{pk2},
Kinds: []nostr.Kind{1},
}, nostr.SubscriptionOptions{})
if err != nil {
t.Fatalf("failed to subscribe: %v", err)
}
require.NoError(t, err, "failed to subscribe")
defer sub.Unsub()
// Publish event from client2
evt2 := createEvent(sk2, 1, "testing live events", nil)
err = client2.Publish(ctx, evt2)
if err != nil {
t.Fatalf("failed to publish event: %v", err)
}
require.NoError(t, err, "failed to publish event")
// Wait for event on subscription
select {
@@ -118,7 +161,7 @@ func TestBasicRelayFunctionality(t *testing.T) {
t.Errorf("got wrong event: %v", env.ID)
}
case <-ctx.Done():
t.Fatal("timeout waiting for live event")
require.FailNow(t, "timeout waiting for live event")
}
})
@@ -130,24 +173,18 @@ func TestBasicRelayFunctionality(t *testing.T) {
// Create an event to be deleted
evt3 := createEvent(sk1, 1, "delete me", nil)
err = client1.Publish(ctx, evt3)
if err != nil {
t.Fatalf("failed to publish event: %v", err)
}
require.NoError(t, err, "failed to publish event")
// Create deletion event
delEvent := createEvent(sk1, 5, "deleting", nostr.Tags{{"e", evt3.ID.Hex()}})
err = client1.Publish(ctx, delEvent)
if err != nil {
t.Fatalf("failed to publish deletion event: %v", err)
}
require.NoError(t, err, "failed to publish deletion event")
// Try to query the deleted event
sub, err := client2.Subscribe(ctx, nostr.Filter{
IDs: []nostr.ID{evt3.ID},
}, nostr.SubscriptionOptions{})
if err != nil {
t.Fatalf("failed to subscribe: %v", err)
}
require.NoError(t, err, "failed to subscribe")
defer sub.Unsub()
// Should get EOSE without receiving the deleted event
@@ -162,7 +199,7 @@ func TestBasicRelayFunctionality(t *testing.T) {
}
goto checkDeleteStored
case <-ctx.Done():
t.Fatal("timeout waiting for EOSE")
require.FailNow(t, "timeout waiting for EOSE")
}
}
@@ -171,9 +208,7 @@ func TestBasicRelayFunctionality(t *testing.T) {
subDelete, err := client2.Subscribe(ctx, nostr.Filter{
IDs: []nostr.ID{delEvent.ID},
}, nostr.SubscriptionOptions{})
if err != nil {
t.Fatalf("failed to subscribe to delete event: %v", err)
}
require.NoError(t, err, "failed to subscribe to delete event")
defer subDelete.Unsub()
gotDeleteEvent := false
@@ -189,7 +224,7 @@ func TestBasicRelayFunctionality(t *testing.T) {
}
return
case <-ctx.Done():
t.Fatal("timeout waiting for EOSE on delete event")
require.FailNow(t, "timeout waiting for EOSE on delete event")
}
}
})
@@ -204,36 +239,28 @@ func TestBasicRelayFunctionality(t *testing.T) {
evt1.CreatedAt = 1000 // Set specific timestamp for testing
evt1.Sign(sk1)
err = client1.Publish(ctx, evt1)
if err != nil {
t.Fatalf("failed to publish initial event: %v", err)
}
require.NoError(t, err, "failed to publish initial event")
// create newer event that should replace the first
evt2 := createEvent(sk1, 0, `{"name":"newer"}`, nil)
evt2.CreatedAt = 2004 // Newer timestamp
evt2.Sign(sk1)
err = client1.Publish(ctx, evt2)
if err != nil {
t.Fatalf("failed to publish newer event: %v", err)
}
require.NoError(t, err, "failed to publish newer event")
// create older event that should not replace the current one
evt3 := createEvent(sk1, 0, `{"name":"older"}`, nil)
evt3.CreatedAt = 1500 // Older than evt2
evt3.Sign(sk1)
err = client1.Publish(ctx, evt3)
if err != nil {
t.Fatalf("failed to publish older event: %v", err)
}
require.NoError(t, err, "failed to publish older event")
// query to verify only the newest event exists
sub, err := client2.Subscribe(ctx, nostr.Filter{
Authors: []nostr.PubKey{pk1},
Kinds: []nostr.Kind{0},
}, nostr.SubscriptionOptions{})
if err != nil {
t.Fatalf("failed to subscribe: %v", err)
}
require.NoError(t, err, "failed to subscribe")
defer sub.Unsub()
// should only get one event back (the newest one)
@@ -251,7 +278,7 @@ func TestBasicRelayFunctionality(t *testing.T) {
}
return
case <-ctx.Done():
t.Fatal("timeout waiting for events")
require.FailNow(t, "timeout waiting for events")
}
}
})
@@ -281,26 +308,20 @@ func TestBasicRelayFunctionality(t *testing.T) {
// connect test client
url := "ws" + server.URL[4:]
client, err := nostr.RelayConnect(t.Context(), url, nostr.RelayOptions{})
if err != nil {
t.Fatalf("failed to connect client: %v", err)
}
require.NoError(t, err, "failed to connect client")
defer client.Close()
// create event that expires in 2 seconds
expiration := strconv.FormatInt(int64(nostr.Now()+2), 10)
evt := createEvent(sk1, 1, "i will expire soon", nostr.Tags{{"expiration", expiration}})
err = client.Publish(ctx, evt)
if err != nil {
t.Fatalf("failed to publish event: %v", err)
}
require.NoError(t, err, "failed to publish event")
// verify event exists initially
sub, err := client.Subscribe(ctx, nostr.Filter{
IDs: []nostr.ID{evt.ID},
}, nostr.SubscriptionOptions{})
if err != nil {
t.Fatalf("failed to subscribe: %v", err)
}
require.NoError(t, err, "failed to subscribe")
// should get the event
select {
@@ -309,7 +330,7 @@ func TestBasicRelayFunctionality(t *testing.T) {
t.Error("got wrong event")
}
case <-ctx.Done():
t.Fatal("timeout waiting for event")
require.FailNow(t, "timeout waiting for event")
}
sub.Unsub()
@@ -320,9 +341,7 @@ func TestBasicRelayFunctionality(t *testing.T) {
sub, err = client.Subscribe(ctx, nostr.Filter{
IDs: []nostr.ID{evt.ID},
}, nostr.SubscriptionOptions{})
if err != nil {
t.Fatalf("failed to subscribe: %v", err)
}
require.NoError(t, err, "failed to subscribe")
defer sub.Unsub()
// should get EOSE without receiving the expired event
@@ -337,7 +356,7 @@ func TestBasicRelayFunctionality(t *testing.T) {
}
return
case <-ctx.Done():
t.Fatal("timeout waiting for EOSE")
require.FailNow(t, "timeout waiting for EOSE")
}
}
})
@@ -350,33 +369,26 @@ func TestBasicRelayFunctionality(t *testing.T) {
// create an event from client1
evt4 := createEvent(sk1, 1, "try to delete me", nil)
err = client1.Publish(ctx, evt4)
if err != nil {
t.Fatalf("failed to publish event: %v", err)
}
require.NoError(t, err)
// Try to delete it with client2
// try to delete it with client2
delEvent := createEvent(sk2, 5, "trying to delete", nostr.Tags{{"e", evt4.ID.Hex()}})
err = client2.Publish(ctx, delEvent)
if err == nil {
t.Fatalf("should have failed to publish deletion event: %v", err)
}
require.Error(t, err)
// Verify event still exists
// verify event still exists
sub, err := client1.Subscribe(ctx, nostr.Filter{
IDs: []nostr.ID{evt4.ID},
}, nostr.SubscriptionOptions{})
if err != nil {
t.Fatalf("failed to subscribe: %v", err)
}
require.NoError(t, err)
defer sub.Unsub()
select {
case env := <-sub.Events:
if env.ID != evt4.ID {
t.Error("got wrong event")
}
case env, more := <-sub.Events:
require.True(t, more, "should get an event, got nothing")
require.Equal(t, env.ID, evt4.ID, "got wrong event")
case <-ctx.Done():
t.Fatal("event should still exist")
require.FailNow(t, "event should still exist")
}
})
}
+3 -1
View File
@@ -30,7 +30,9 @@ func (rl *Relay) handleRequest(ctx context.Context, id string, eose *sync.WaitGr
// run the function to query events
if nil != rl.QueryStored {
for event := range rl.QueryStored(ctx, filter) {
ws.WriteJSON(nostr.EventEnvelope{SubscriptionID: &id, Event: event})
if nil != ws.WriteJSON(nostr.EventEnvelope{SubscriptionID: &id, Event: event}) {
break
}
}
}
-77
View File
@@ -1,77 +0,0 @@
package khatru
import (
"fiatjaf.com/nostr"
)
type Router struct{ *Relay }
type Route struct {
eventMatcher func(*nostr.Event) bool
filterMatcher func(nostr.Filter) bool
relay *Relay
}
type routeBuilder struct {
router *Router
eventMatcher func(*nostr.Event) bool
filterMatcher func(nostr.Filter) bool
}
func NewRouter() *Router {
rr := &Router{Relay: NewRelay()}
rr.routes = make([]Route, 0, 3)
rr.getSubRelayFromFilter = func(f nostr.Filter) *Relay {
for _, route := range rr.routes {
if route.filterMatcher == nil || route.filterMatcher(f) {
return route.relay
}
}
return rr.Relay
}
rr.getSubRelayFromEvent = func(e *nostr.Event) *Relay {
for _, route := range rr.routes {
if route.eventMatcher == nil || route.eventMatcher(e) {
return route.relay
}
}
return rr.Relay
}
return rr
}
func (rr *Router) Route() routeBuilder {
return routeBuilder{
router: rr,
filterMatcher: func(f nostr.Filter) bool { return false },
eventMatcher: func(e *nostr.Event) bool { return false },
}
}
func (rb routeBuilder) Req(fn func(nostr.Filter) bool) routeBuilder {
rb.filterMatcher = fn
return rb
}
func (rb routeBuilder) AnyReq() routeBuilder {
rb.filterMatcher = nil
return rb
}
func (rb routeBuilder) Event(fn func(*nostr.Event) bool) routeBuilder {
rb.eventMatcher = fn
return rb
}
func (rb routeBuilder) AnyEvent() routeBuilder {
rb.eventMatcher = nil
return rb
}
func (rb routeBuilder) Relay(relay *Relay) {
rb.router.routes = append(rb.router.routes, Route{
filterMatcher: rb.filterMatcher,
eventMatcher: rb.eventMatcher,
relay: relay,
})
}
@@ -0,0 +1,5 @@
go test fuzz v1
int(-180)
int(92)
byte('{')
byte('\n')
@@ -0,0 +1,5 @@
go test fuzz v1
int(140)
int(-52)
byte('"')
byte('h')
@@ -1,3 +1,4 @@
go test fuzz v1
uint(25)
int(25)
int(1)
uint(223)
+8
View File
@@ -11,6 +11,7 @@ const (
subscriptionIdKey
nip86HeaderAuthKey
internalCallKey
serviceURLOverrideKey
)
func RequestAuth(ctx context.Context) {
@@ -73,6 +74,13 @@ func IsAuthed(ctx context.Context, pubkey nostr.PubKey) bool {
return false
}
// ForceSetAuthed modifies the context to insert a custom authed public key.
// It can be used in testing or other rare scenarios for making requests as if a given public key
// was authenticated when in fact it didn't perform any of the authentication rituals.
func ForceSetAuthed(ctx context.Context, pubkey nostr.PubKey) context.Context {
return context.WithValue(ctx, nip86HeaderAuthKey, pubkey)
}
// IsInternalCall returns true when a call to QueryEvents, for example, is being made because of a deletion
// or expiration request.
func IsInternalCall(ctx context.Context) bool {
+14
View File
@@ -2,8 +2,12 @@ package khatru
import (
"context"
"encoding/base64"
"encoding/binary"
"fmt"
"net/http"
"sync"
"unsafe"
"fiatjaf.com/nostr"
"github.com/fasthttp/websocket"
@@ -30,7 +34,17 @@ type WebSocket struct {
negentropySessions *xsync.MapOf[string, *NegentropySession]
}
func (ws *WebSocket) GetID() string {
ptr := uintptr(unsafe.Pointer(ws))
var id [8]byte
binary.LittleEndian.PutUint64(id[:], uint64(ptr))
return base64.RawURLEncoding.EncodeToString(id[:])
}
func (ws *WebSocket) WriteJSON(any any) error {
if ws == nil {
return fmt.Errorf("connection doesn't exist")
}
ws.mutex.Lock()
err := ws.conn.WriteJSON(any)
ws.mutex.Unlock()
+141 -132
View File
@@ -246,6 +246,8 @@ func (kind Kind) Name() string {
return "SimpleGroupMembers"
case KindSimpleGroupRoles:
return "SimpleGroupRoles"
case KindSimpleGroupLiveKitParticipants:
return "SimpleGroupLiveKitParticipants"
case KindWikiArticle:
return "WikiArticle"
case KindRedirects:
@@ -272,143 +274,150 @@ func (kind Kind) Name() string {
return "VideoViewEvent"
case KindCommunityDefinition:
return "CommunityDefinition"
case KindNsiteRoot:
return "NsiteRoot"
case KindNsiteNamed:
return "NsiteNamed"
}
return "unknown"
}
const (
KindProfileMetadata Kind = 0
KindTextNote Kind = 1
KindRecommendServer Kind = 2
KindFollowList Kind = 3
KindEncryptedDirectMessage Kind = 4
KindDeletion Kind = 5
KindRepost Kind = 6
KindReaction Kind = 7
KindBadgeAward Kind = 8
KindSimpleGroupChatMessage Kind = 9
KindSimpleGroupThreadedReply Kind = 10
KindSimpleGroupThread Kind = 11
KindSimpleGroupReply Kind = 12
KindSeal Kind = 13
KindDirectMessage Kind = 14
KindGenericRepost Kind = 16
KindReactionToWebsite Kind = 17
KindChannelCreation Kind = 40
KindChannelMetadata Kind = 41
KindChannelMessage Kind = 42
KindChannelHideMessage Kind = 43
KindChannelMuteUser Kind = 44
KindChess Kind = 64
KindMergeRequests Kind = 818
KindComment Kind = 1111
KindBid Kind = 1021
KindBidConfirmation Kind = 1022
KindOpenTimestamps Kind = 1040
KindGiftWrap Kind = 1059
KindFileMetadata Kind = 1063
KindLiveChatMessage Kind = 1311
KindPatch Kind = 1617
KindIssue Kind = 1621
KindReply Kind = 1622
KindStatusOpen Kind = 1630
KindStatusApplied Kind = 1631
KindStatusClosed Kind = 1632
KindStatusDraft Kind = 1633
KindProblemTracker Kind = 1971
KindReporting Kind = 1984
KindLabel Kind = 1985
KindRelayReviews Kind = 1986
KindAIEmbeddings Kind = 1987
KindTorrent Kind = 2003
KindTorrentComment Kind = 2004
KindCoinjoinPool Kind = 2022
KindCommunityPostApproval Kind = 4550
KindJobFeedback Kind = 7000
KindSimpleGroupPutUser Kind = 9000
KindSimpleGroupRemoveUser Kind = 9001
KindSimpleGroupEditMetadata Kind = 9002
KindSimpleGroupDeleteEvent Kind = 9005
KindSimpleGroupCreateGroup Kind = 9007
KindSimpleGroupDeleteGroup Kind = 9008
KindSimpleGroupCreateInvite Kind = 9009
KindSimpleGroupJoinRequest Kind = 9021
KindSimpleGroupLeaveRequest Kind = 9022
KindZapGoal Kind = 9041
KindNutZap Kind = 9321
KindTidalLogin Kind = 9467
KindZapRequest Kind = 9734
KindZap Kind = 9735
KindHighlights Kind = 9802
KindMuteList Kind = 10000
KindPinList Kind = 10001
KindRelayListMetadata Kind = 10002
KindBookmarkList Kind = 10003
KindCommunityList Kind = 10004
KindPublicChatList Kind = 10005
KindBlockedRelayList Kind = 10006
KindSearchRelayList Kind = 10007
KindSimpleGroupList Kind = 10009
KindInterestList Kind = 10015
KindNutZapInfo Kind = 10019
KindEmojiList Kind = 10030
KindDMRelayList Kind = 10050
KindUserServerList Kind = 10063
KindFileStorageServerList Kind = 10096
KindGoodWikiAuthorList Kind = 10101
KindGoodWikiRelayList Kind = 10102
KindNWCWalletInfo Kind = 13194
KindLightningPubRPC Kind = 21000
KindClientAuthentication Kind = 22242
KindNWCWalletRequest Kind = 23194
KindNWCWalletResponse Kind = 23195
KindNostrConnect Kind = 24133
KindBlobs Kind = 24242
KindHTTPAuth Kind = 27235
KindCategorizedPeopleList Kind = 30000
KindCategorizedBookmarksList Kind = 30001
KindRelaySets Kind = 30002
KindBookmarkSets Kind = 30003
KindCuratedSets Kind = 30004
KindCuratedVideoSets Kind = 30005
KindMuteSets Kind = 30007
KindProfileBadges Kind = 30008
KindBadgeDefinition Kind = 30009
KindInterestSets Kind = 30015
KindStallDefinition Kind = 30017
KindProductDefinition Kind = 30018
KindMarketplaceUI Kind = 30019
KindProductSoldAsAuction Kind = 30020
KindArticle Kind = 30023
KindDraftArticle Kind = 30024
KindEmojiSets Kind = 30030
KindModularArticleHeader Kind = 30040
KindModularArticleContent Kind = 30041
KindReleaseArtifactSets Kind = 30063
KindApplicationSpecificData Kind = 30078
KindLiveEvent Kind = 30311
KindUserStatuses Kind = 30315
KindClassifiedListing Kind = 30402
KindDraftClassifiedListing Kind = 30403
KindRepositoryAnnouncement Kind = 30617
KindRepositoryState Kind = 30618
KindSimpleGroupMetadata Kind = 39000
KindSimpleGroupAdmins Kind = 39001
KindSimpleGroupMembers Kind = 39002
KindSimpleGroupRoles Kind = 39003
KindWikiArticle Kind = 30818
KindRedirects Kind = 30819
KindFeed Kind = 31890
KindDateCalendarEvent Kind = 31922
KindTimeCalendarEvent Kind = 31923
KindCalendar Kind = 31924
KindCalendarEventRSVP Kind = 31925
KindHandlerRecommendation Kind = 31989
KindHandlerInformation Kind = 31990
KindVideoEvent Kind = 34235
KindShortVideoEvent Kind = 34236
KindVideoViewEvent Kind = 34237
KindCommunityDefinition Kind = 34550
KindProfileMetadata Kind = 0
KindTextNote Kind = 1
KindRecommendServer Kind = 2
KindFollowList Kind = 3
KindEncryptedDirectMessage Kind = 4
KindDeletion Kind = 5
KindRepost Kind = 6
KindReaction Kind = 7
KindBadgeAward Kind = 8
KindSimpleGroupChatMessage Kind = 9
KindSimpleGroupThreadedReply Kind = 10
KindSimpleGroupThread Kind = 11
KindSimpleGroupReply Kind = 12
KindSeal Kind = 13
KindDirectMessage Kind = 14
KindGenericRepost Kind = 16
KindReactionToWebsite Kind = 17
KindChannelCreation Kind = 40
KindChannelMetadata Kind = 41
KindChannelMessage Kind = 42
KindChannelHideMessage Kind = 43
KindChannelMuteUser Kind = 44
KindChess Kind = 64
KindMergeRequests Kind = 818
KindComment Kind = 1111
KindBid Kind = 1021
KindBidConfirmation Kind = 1022
KindOpenTimestamps Kind = 1040
KindGiftWrap Kind = 1059
KindFileMetadata Kind = 1063
KindLiveChatMessage Kind = 1311
KindPatch Kind = 1617
KindIssue Kind = 1621
KindReply Kind = 1622
KindStatusOpen Kind = 1630
KindStatusApplied Kind = 1631
KindStatusClosed Kind = 1632
KindStatusDraft Kind = 1633
KindProblemTracker Kind = 1971
KindReporting Kind = 1984
KindLabel Kind = 1985
KindRelayReviews Kind = 1986
KindAIEmbeddings Kind = 1987
KindTorrent Kind = 2003
KindTorrentComment Kind = 2004
KindCoinjoinPool Kind = 2022
KindCommunityPostApproval Kind = 4550
KindJobFeedback Kind = 7000
KindSimpleGroupPutUser Kind = 9000
KindSimpleGroupRemoveUser Kind = 9001
KindSimpleGroupEditMetadata Kind = 9002
KindSimpleGroupDeleteEvent Kind = 9005
KindSimpleGroupCreateGroup Kind = 9007
KindSimpleGroupDeleteGroup Kind = 9008
KindSimpleGroupCreateInvite Kind = 9009
KindSimpleGroupJoinRequest Kind = 9021
KindSimpleGroupLeaveRequest Kind = 9022
KindZapGoal Kind = 9041
KindNutZap Kind = 9321
KindTidalLogin Kind = 9467
KindZapRequest Kind = 9734
KindZap Kind = 9735
KindHighlights Kind = 9802
KindMuteList Kind = 10000
KindPinList Kind = 10001
KindRelayListMetadata Kind = 10002
KindBookmarkList Kind = 10003
KindCommunityList Kind = 10004
KindPublicChatList Kind = 10005
KindBlockedRelayList Kind = 10006
KindSearchRelayList Kind = 10007
KindSimpleGroupList Kind = 10009
KindInterestList Kind = 10015
KindNutZapInfo Kind = 10019
KindEmojiList Kind = 10030
KindDMRelayList Kind = 10050
KindUserServerList Kind = 10063
KindFileStorageServerList Kind = 10096
KindGoodWikiAuthorList Kind = 10101
KindGoodWikiRelayList Kind = 10102
KindNWCWalletInfo Kind = 13194
KindNsiteRoot Kind = 15128
KindLightningPubRPC Kind = 21000
KindClientAuthentication Kind = 22242
KindNWCWalletRequest Kind = 23194
KindNWCWalletResponse Kind = 23195
KindNostrConnect Kind = 24133
KindBlobs Kind = 24242
KindHTTPAuth Kind = 27235
KindCategorizedPeopleList Kind = 30000
KindCategorizedBookmarksList Kind = 30001
KindRelaySets Kind = 30002
KindBookmarkSets Kind = 30003
KindCuratedSets Kind = 30004
KindCuratedVideoSets Kind = 30005
KindMuteSets Kind = 30007
KindProfileBadges Kind = 30008
KindBadgeDefinition Kind = 30009
KindInterestSets Kind = 30015
KindStallDefinition Kind = 30017
KindProductDefinition Kind = 30018
KindMarketplaceUI Kind = 30019
KindProductSoldAsAuction Kind = 30020
KindArticle Kind = 30023
KindDraftArticle Kind = 30024
KindEmojiSets Kind = 30030
KindModularArticleHeader Kind = 30040
KindModularArticleContent Kind = 30041
KindReleaseArtifactSets Kind = 30063
KindApplicationSpecificData Kind = 30078
KindLiveEvent Kind = 30311
KindUserStatuses Kind = 30315
KindClassifiedListing Kind = 30402
KindDraftClassifiedListing Kind = 30403
KindRepositoryAnnouncement Kind = 30617
KindRepositoryState Kind = 30618
KindNsiteNamed Kind = 35128
KindSimpleGroupMetadata Kind = 39000
KindSimpleGroupAdmins Kind = 39001
KindSimpleGroupMembers Kind = 39002
KindSimpleGroupRoles Kind = 39003
KindSimpleGroupLiveKitParticipants Kind = 39004
KindWikiArticle Kind = 30818
KindRedirects Kind = 30819
KindFeed Kind = 31890
KindDateCalendarEvent Kind = 31922
KindTimeCalendarEvent Kind = 31923
KindCalendar Kind = 31924
KindCalendarEventRSVP Kind = 31925
KindHandlerRecommendation Kind = 31989
KindHandlerInformation Kind = 31990
KindVideoEvent Kind = 34235
KindShortVideoEvent Kind = 34236
KindVideoViewEvent Kind = 34237
KindCommunityDefinition Kind = 34550
)
func (kind Kind) IsRegular() bool {
+18 -18
View File
@@ -9,30 +9,30 @@ import (
func TestAddSupportedNIP(t *testing.T) {
info := RelayInformationDocument{}
info.AddSupportedNIP(12)
info.AddSupportedNIP(12)
info.AddSupportedNIP(13)
info.AddSupportedNIP(1)
info.AddSupportedNIP(12)
info.AddSupportedNIP(44)
info.AddSupportedNIP(2)
info.AddSupportedNIP(13)
info.AddSupportedNIP(2)
info.AddSupportedNIP(13)
info.AddSupportedNIP(0)
info.AddSupportedNIP(17)
info.AddSupportedNIP(19)
info.AddSupportedNIP(1)
info.AddSupportedNIP(18)
info.AddSupportedNIP("12")
info.AddSupportedNIP("12")
info.AddSupportedNIP("13")
info.AddSupportedNIP("1")
info.AddSupportedNIP("12")
info.AddSupportedNIP("44")
info.AddSupportedNIP("2")
info.AddSupportedNIP("13")
info.AddSupportedNIP("2")
info.AddSupportedNIP("13")
info.AddSupportedNIP("0")
info.AddSupportedNIP("17")
info.AddSupportedNIP("19")
info.AddSupportedNIP("1")
info.AddSupportedNIP("18")
assert.Contains(t, info.SupportedNIPs, 0, 1, 2, 12, 13, 17, 18, 19, 44)
assert.Contains(t, info.SupportedNIPs, "0", "1", "2", "12", "13", "17", "18", "19", "44")
}
func TestAddSupportedNIPs(t *testing.T) {
info := RelayInformationDocument{}
info.AddSupportedNIPs([]int{0, 1, 2, 12, 13, 17, 18, 19, 44})
info.AddSupportedNIPs([]int{"0", "1", "2", "12", "13", "17", "18", "19", "44"})
assert.Contains(t, info.SupportedNIPs, 0, 1, 2, 12, 13, 17, 18, 19, 44)
assert.Contains(t, info.SupportedNIPs, "0", "1", "2", "12", "13", "17", "18", "19", "44")
}
func TestFetch(t *testing.T) {
+5 -5
View File
@@ -14,7 +14,7 @@ type RelayInformationDocument struct {
PubKey *nostr.PubKey `json:"pubkey,omitempty"`
Self *nostr.PubKey `json:"self,omitempty"`
Contact string `json:"contact,omitempty"`
SupportedNIPs []any `json:"supported_nips,omitempty"`
SupportedNIPs []string `json:"supported_nips,omitempty"`
Software string `json:"software,omitempty"`
Version string `json:"version,omitempty"`
@@ -33,16 +33,16 @@ type RelayInformationDocument struct {
SupportedGrasps []string `json:"supported_grasps,omitempty"`
}
func (info *RelayInformationDocument) AddSupportedNIP(number int) {
idx := slices.IndexFunc(info.SupportedNIPs, func(n any) bool { return n == number })
func (info *RelayInformationDocument) AddSupportedNIP(nip string) {
idx := slices.IndexFunc(info.SupportedNIPs, func(n string) bool { return n == nip })
if idx != -1 {
return
}
info.SupportedNIPs = append(info.SupportedNIPs, number)
info.SupportedNIPs = append(info.SupportedNIPs, nip)
}
func (info *RelayInformationDocument) AddSupportedNIPs(numbers []int) {
func (info *RelayInformationDocument) AddSupportedNIPs(numbers []string) {
for _, n := range numbers {
info.AddSupportedNIP(n)
}
+126 -35
View File
@@ -4,6 +4,7 @@ import (
"fmt"
"net/url"
"slices"
"strconv"
"strings"
"fiatjaf.com/nostr"
@@ -38,10 +39,11 @@ func ParseGroupAddress(raw string) (GroupAddress, error) {
type Group struct {
Address GroupAddress
Name string
Picture string
About string
Members map[nostr.PubKey][]*Role
Name string
Picture string
About string
Members map[nostr.PubKey][]*Role
LiveKitParticipants []nostr.PubKey
// indicates that only members can read group messages
Private bool
@@ -55,13 +57,20 @@ type Group struct {
// indicates that relays should hide group metadata from non-members
Hidden bool
// indicates that the group supports audio/video live chat
LiveKit bool
// indicates which event kinds this group supports
SupportedKinds []nostr.Kind
Roles []*Role
InviteCodes []string
LastMetadataUpdate nostr.Timestamp
LastAdminsUpdate nostr.Timestamp
LastMembersUpdate nostr.Timestamp
LastRolesUpdate nostr.Timestamp
LastMetadataUpdate nostr.Timestamp
LastAdminsUpdate nostr.Timestamp
LastMembersUpdate nostr.Timestamp
LastRolesUpdate nostr.Timestamp
LastLiveKitParticipantsUpdate nostr.Timestamp
}
func (group Group) String() string {
@@ -83,6 +92,11 @@ func (group Group) String() string {
maybeClosed = " closed"
}
maybeLiveKit := ""
if group.LiveKit {
maybeLiveKit = " livekit"
}
members := make([]string, len(group.Members))
i := 0
for pubkey, roles := range group.Members {
@@ -101,13 +115,14 @@ func (group Group) String() string {
i++
}
return fmt.Sprintf(`<Group %s name="%s"%s%s%s%s picture="%s" about="%s" members=[%v]>`,
return fmt.Sprintf(`<Group %s name="%s"%s%s%s%s%s picture="%s" about="%s" members=[%v]>`,
group.Address,
group.Name,
maybePrivate,
maybeRestricted,
maybeHidden,
maybeClosed,
maybeLiveKit,
group.Picture,
group.About,
strings.Join(members, " "),
@@ -122,9 +137,10 @@ func NewGroup(gadstr string) (Group, error) {
}
return Group{
Address: gad,
Name: gad.ID,
Members: make(map[nostr.PubKey][]*Role),
Address: gad,
Name: gad.ID,
Members: make(map[nostr.PubKey][]*Role),
LiveKitParticipants: make([]nostr.PubKey, 0),
}, nil
}
@@ -134,8 +150,9 @@ func NewGroupFromMetadataEvent(relayURL string, evt *nostr.Event) (Group, error)
Relay: relayURL,
ID: evt.Tags.GetD(),
},
Name: evt.Tags.GetD(),
Members: make(map[nostr.PubKey][]*Role),
Name: evt.Tags.GetD(),
Members: make(map[nostr.PubKey][]*Role),
LiveKitParticipants: make([]nostr.PubKey, 0),
}
err := g.MergeInMetadataEvent(evt)
@@ -173,6 +190,18 @@ func (group Group) ToMetadataEvent() nostr.Event {
if group.Closed {
evt.Tags = append(evt.Tags, nostr.Tag{"closed"})
}
if group.LiveKit {
evt.Tags = append(evt.Tags, nostr.Tag{"livekit"})
}
if group.SupportedKinds != nil {
tag := make(nostr.Tag, 1, 1+len(group.SupportedKinds))
tag[0] = "supported_kinds"
for _, kind := range group.SupportedKinds {
tag = append(tag, strconv.Itoa(int(kind)))
}
evt.Tags = append(evt.Tags, tag)
}
return evt
}
@@ -236,6 +265,22 @@ func (group Group) ToRolesEvent() nostr.Event {
return evt
}
func (group Group) ToLiveKitParticipantsEvent() nostr.Event {
evt := nostr.Event{
Kind: nostr.KindSimpleGroupLiveKitParticipants,
CreatedAt: group.LastLiveKitParticipantsUpdate,
Tags: make(nostr.Tags, 1, 1+len(group.LiveKitParticipants)),
}
evt.Tags[0] = nostr.Tag{"d", group.Address.ID}
for _, member := range group.LiveKitParticipants {
tag := nostr.Tag{"participant", member.Hex()}
evt.Tags = append(evt.Tags, tag)
}
return evt
}
func (group *Group) MergeInMetadataEvent(evt *nostr.Event) error {
if evt.Kind != nostr.KindSimpleGroupMetadata {
return fmt.Errorf("expected kind %d, got %d", nostr.KindSimpleGroupMetadata, evt.Kind)
@@ -247,27 +292,42 @@ func (group *Group) MergeInMetadataEvent(evt *nostr.Event) error {
group.LastMetadataUpdate = evt.CreatedAt
group.Name = group.Address.ID
if tag := evt.Tags.Find("name"); tag != nil {
group.Name = tag[1]
}
if tag := evt.Tags.Find("about"); tag != nil {
group.About = tag[1]
}
if tag := evt.Tags.Find("picture"); tag != nil {
group.Picture = tag[1]
}
if tag := evt.Tags.Find("private"); tag != nil {
group.Private = true
}
if tag := evt.Tags.Find("restricted"); tag != nil {
group.Restricted = true
}
if tag := evt.Tags.Find("hidden"); tag != nil {
group.Hidden = true
}
if tag := evt.Tags.Find("closed"); tag != nil {
group.Closed = true
for _, tag := range evt.Tags {
if len(tag) >= 1 {
switch tag[0] {
case "private":
group.Private = true
case "restricted":
group.Restricted = true
case "closed":
group.Closed = true
case "hidden":
group.Hidden = true
case "livekit":
group.LiveKit = true
case "supported_kinds":
kinds := make([]nostr.Kind, 0, len(tag)-1)
for _, raw := range tag[1:] {
kind, err := strconv.Atoi(raw)
if err != nil {
continue
}
kinds = append(kinds, nostr.Kind(kind))
}
group.SupportedKinds = kinds
default:
if len(tag) >= 2 {
switch tag[0] {
case "name":
group.Name = tag[1]
case "about":
group.About = tag[1]
case "picture":
group.Picture = tag[1]
}
}
}
}
}
return nil
@@ -368,3 +428,34 @@ func (group *Group) MergeInRolesEvent(evt *nostr.Event) error {
return nil
}
func (group *Group) MergeInLiveKitParticipantsEvent(evt *nostr.Event) error {
if evt.Kind != nostr.KindSimpleGroupLiveKitParticipants {
return fmt.Errorf("expected kind %d, got %d", nostr.KindSimpleGroupLiveKitParticipants, evt.Kind)
}
if evt.CreatedAt < group.LastLiveKitParticipantsUpdate {
return fmt.Errorf("event is older than our last update (%d vs %d)", evt.CreatedAt, group.LastLiveKitParticipantsUpdate)
}
group.LastLiveKitParticipantsUpdate = evt.CreatedAt
group.LiveKitParticipants = make([]nostr.PubKey, 0, len(evt.Tags))
for _, tag := range evt.Tags {
if len(tag) < 2 {
continue
}
if tag[0] != "participant" {
continue
}
member, err := nostr.PubKeyFromHex(tag[1])
if err != nil {
continue
}
if slices.Contains(group.LiveKitParticipants, member) {
continue
}
group.LiveKitParticipants = append(group.LiveKitParticipants, member)
}
return nil
}
+129 -48
View File
@@ -3,6 +3,7 @@ package nip29
import (
"fmt"
"slices"
"strconv"
"fiatjaf.com/nostr"
)
@@ -78,48 +79,101 @@ var moderationActionFactories = map[nostr.Kind]func(nostr.Event) (Action, error)
nostr.KindSimpleGroupEditMetadata: func(evt nostr.Event) (Action, error) {
ok := false
edit := EditMetadata{When: evt.CreatedAt}
if t := evt.Tags.Find("name"); t != nil {
edit.NameValue = &t[1]
ok = true
}
if t := evt.Tags.Find("picture"); t != nil {
edit.PictureValue = &t[1]
ok = true
}
if t := evt.Tags.Find("about"); t != nil {
edit.AboutValue = &t[1]
ok = true
}
y := true
n := false
if evt.Tags.Has("closed") {
edit.ClosedValue = &y
ok = true
} else if evt.Tags.Has("open") {
edit.ClosedValue = &n
ok = true
}
if evt.Tags.Has("restricted") {
edit.RestrictedValue = &y
ok = true
} else if evt.Tags.Has("unrestricted") {
edit.RestrictedValue = &n
ok = true
}
if evt.Tags.Has("hidden") {
edit.HiddenValue = &y
ok = true
} else if evt.Tags.Has("visible") {
edit.HiddenValue = &n
ok = true
}
if evt.Tags.Has("private") {
edit.PrivateValue = &y
ok = true
} else if evt.Tags.Has("public") {
edit.PrivateValue = &n
ok = true
hasName := false
// DEPRECATED: remove all the fields not tagged with Replace = true eventually
// edit-metadata to become a PUT rather than a PATCH
for _, tag := range evt.Tags {
if len(tag) >= 1 {
switch tag[0] {
case "name":
if len(tag) >= 2 {
edit.NameValue = &tag[1]
if ok {
edit.Replace = true
}
ok = true
hasName = true
}
case "picture":
if len(tag) >= 2 {
edit.PictureValue = &tag[1]
if hasName {
edit.Replace = true
}
ok = true
}
case "about":
if len(tag) >= 2 {
edit.AboutValue = &tag[1]
if hasName {
edit.Replace = true
}
ok = true
}
case "supported_kinds":
kinds := make([]nostr.Kind, 0, len(tag)-1)
for _, kstr := range tag[1:] {
if kind, err := strconv.ParseUint(kstr, 10, 16); err != nil {
return nil, fmt.Errorf("invalid kind: %w", err)
} else {
kinds = append(kinds, nostr.Kind(kind))
}
}
edit.SupportedKindsValue = &kinds
edit.Replace = true
case "closed":
edit.ClosedValue = &y
if hasName {
edit.Replace = true
}
ok = true
case "open":
edit.ClosedValue = &n
ok = true
case "restricted":
edit.RestrictedValue = &y
if hasName {
edit.Replace = true
}
ok = true
case "unrestricted":
edit.RestrictedValue = &n
ok = true
case "hidden":
edit.HiddenValue = &y
if hasName {
edit.Replace = true
}
ok = true
case "visible":
edit.HiddenValue = &n
ok = true
case "private":
edit.PrivateValue = &y
if hasName {
edit.Replace = true
}
ok = true
case "public":
edit.PrivateValue = &n
ok = true
case "livekit":
edit.LiveKitValue = &y
edit.Replace = true
ok = true
case "no-livekit":
edit.LiveKitValue = &n
ok = true
case "no-text":
edit.SupportedKindsValue = nil
ok = true
}
}
}
if ok {
@@ -226,19 +280,36 @@ func (a RemoveUser) Apply(group *Group) {
}
type EditMetadata struct {
NameValue *string
PictureValue *string
AboutValue *string
RestrictedValue *bool
ClosedValue *bool
HiddenValue *bool
PrivateValue *bool
When nostr.Timestamp
NameValue *string
PictureValue *string
AboutValue *string
RestrictedValue *bool
ClosedValue *bool
HiddenValue *bool
PrivateValue *bool
LiveKitValue *bool
SupportedKindsValue *[]nostr.Kind
Replace bool
When nostr.Timestamp
}
func (_ EditMetadata) Name() string { return "edit-metadata" }
func (a EditMetadata) Apply(group *Group) {
group.LastMetadataUpdate = a.When
if a.Replace {
group.Name = ""
group.Picture = ""
group.About = ""
group.Restricted = false
group.Closed = false
group.Hidden = false
group.Private = false
group.LiveKit = false
group.SupportedKinds = nil
}
if a.NameValue != nil {
group.Name = *a.NameValue
}
@@ -260,6 +331,12 @@ func (a EditMetadata) Apply(group *Group) {
if a.PrivateValue != nil {
group.Private = *a.PrivateValue
}
if a.LiveKitValue != nil {
group.LiveKit = *a.LiveKitValue
}
if a.SupportedKindsValue != nil {
group.SupportedKinds = *a.SupportedKindsValue
}
}
type CreateGroup struct {
@@ -272,6 +349,7 @@ func (a CreateGroup) Apply(group *Group) {
group.LastMetadataUpdate = a.When
group.LastAdminsUpdate = a.When
group.LastMembersUpdate = a.When
group.LastLiveKitParticipantsUpdate = a.When
}
type DeleteGroup struct {
@@ -281,6 +359,7 @@ type DeleteGroup struct {
func (_ DeleteGroup) Name() string { return "delete-group" }
func (a DeleteGroup) Apply(group *Group) {
group.Members = make(map[nostr.PubKey][]*Role)
group.LiveKitParticipants = make([]nostr.PubKey, 0)
group.Closed = true
group.Private = true
group.Restricted = true
@@ -288,9 +367,11 @@ func (a DeleteGroup) Apply(group *Group) {
group.Name = "[deleted]"
group.About = ""
group.Picture = ""
group.LiveKit = false
group.LastMetadataUpdate = a.When
group.LastAdminsUpdate = a.When
group.LastMembersUpdate = a.When
group.LastLiveKitParticipantsUpdate = a.When
}
type CreateInvite struct {
+1
View File
@@ -28,6 +28,7 @@ var MetadataEventKinds = KindRange{
nostr.KindSimpleGroupAdmins,
nostr.KindSimpleGroupMembers,
nostr.KindSimpleGroupRoles,
nostr.KindSimpleGroupLiveKitParticipants,
}
func (kr KindRange) Includes(kind nostr.Kind) bool {
+106
View File
@@ -0,0 +1,106 @@
package gitnaturalapi
import (
"fmt"
"strconv"
"strings"
)
type Person struct {
Name string
Email string
Timestamp int64
Timezone string
}
type Commit struct {
Hash string
Tree string
Parents []string
Author Person
Committer Person
Message string
}
func ParseCommit(data []byte, hash string) (*Commit, error) {
content := string(data)
headerEndIndex := strings.Index(content, "\n\n")
if headerEndIndex == -1 {
return nil, fmt.Errorf("invalid commit format for %s: no message separator found", hash)
}
header := content[:headerEndIndex]
message := content[headerEndIndex+2:]
lines := strings.Split(header, "\n")
result := &Commit{
Hash: hash,
Parents: []string{},
Message: message,
}
for _, line := range lines {
if strings.HasPrefix(line, "tree ") {
result.Tree = line[5:]
} else if strings.HasPrefix(line, "parent ") {
result.Parents = append(result.Parents, line[7:])
} else if strings.HasPrefix(line, "author ") {
person, err := parsePerson(line[7:])
if err != nil {
return nil, fmt.Errorf("invalid author in commit %s: %w", hash, err)
}
result.Author = person
} else if strings.HasPrefix(line, "committer ") {
person, err := parsePerson(line[10:])
if err != nil {
return nil, fmt.Errorf("invalid committer in commit %s: %w", hash, err)
}
result.Committer = person
}
}
if result.Tree == "" {
return nil, fmt.Errorf("invalid commit format for %s: missing tree", hash)
}
if result.Author.Name == "" {
return nil, fmt.Errorf("invalid commit format for %s: missing author", hash)
}
if result.Committer.Name == "" {
return nil, fmt.Errorf("invalid commit format for %s: missing committer", hash)
}
return result, nil
}
func parsePerson(line string) (Person, error) {
emailStart := strings.Index(line, " <")
if emailStart == -1 {
return Person{}, fmt.Errorf("invalid person format: %s", line)
}
name := line[:emailStart]
emailEnd := strings.Index(line[emailStart+2:], ">")
if emailEnd == -1 {
return Person{}, fmt.Errorf("invalid person format: %s", line)
}
email := line[emailStart+2 : emailStart+2+emailEnd]
rest := strings.TrimSpace(line[emailStart+2+emailEnd+1:])
parts := strings.SplitN(rest, " ", 2)
if len(parts) != 2 {
return Person{}, fmt.Errorf("invalid person format: %s", line)
}
timestamp, err := strconv.ParseInt(parts[0], 10, 64)
if err != nil {
return Person{}, fmt.Errorf("invalid timestamp in person: %s", parts[0])
}
return Person{
Name: name,
Email: email,
Timestamp: timestamp,
Timezone: parts[1],
}, nil
}
+413
View File
@@ -0,0 +1,413 @@
package gitnaturalapi
import (
"strings"
"sync"
)
type DiffLine struct {
Index int
Status string
Text string
Change string
}
type DiffFile struct {
Path string
Status string
Content []byte
Lines []DiffLine
}
type changedEntry struct {
newVersion TreeEntry
oldVersions []TreeEntry
}
func GetCommitDiff(url string, commitOrRef string) ([]DiffFile, error) {
commit, err := GetSingleCommit(url, commitOrRef)
if err != nil {
return nil, err
}
added := make(map[string]TreeEntry)
deleted := make(map[string]TreeEntry)
changed := make(map[string]*changedEntry)
unchanged := make(map[string]bool)
for _, parent := range commit.Parents {
parentCommit, err := GetSingleCommit(url, parent)
if err != nil {
return nil, err
}
err = computeTreeDiffs(url, commit.Tree, parentCommit.Tree, "", added, deleted, changed, unchanged)
if err != nil {
return nil, err
}
}
var diff []DiffFile
var mu sync.Mutex
var wg sync.WaitGroup
var firstErr error
for path, entry := range changed {
p := path
e := entry
wg.Add(1)
go func() {
defer wg.Done()
curr, err := GetObject(url, e.newVersion.Hash)
if err != nil {
mu.Lock()
if firstErr == nil {
firstErr = err
}
mu.Unlock()
return
}
if curr == nil {
return
}
if len(e.oldVersions) == 0 {
return
}
oldObj, err := GetObject(url, e.oldVersions[0].Hash)
if err != nil || oldObj == nil {
return
}
if isBinary(curr.Data) || isBinary(oldObj.Data) {
mu.Lock()
diff = append(diff, DiffFile{
Path: p,
Status: "changed-binary",
})
mu.Unlock()
return
}
lines := diffTextLines(oldObj.Data, curr.Data)
mu.Lock()
diff = append(diff, DiffFile{
Path: p,
Status: "changed",
Lines: lines,
})
mu.Unlock()
}()
}
for path, entry := range deleted {
p := path
e := entry
wg.Add(1)
go func() {
defer wg.Done()
obj, err := GetObject(url, e.Hash)
if err != nil || obj == nil {
return
}
mu.Lock()
diff = append(diff, DiffFile{
Path: p,
Status: "deleted",
Content: obj.Data,
})
mu.Unlock()
}()
}
for path, entry := range added {
p := path
e := entry
wg.Add(1)
go func() {
defer wg.Done()
obj, err := GetObject(url, e.Hash)
if err != nil || obj == nil {
return
}
mu.Lock()
diff = append(diff, DiffFile{
Path: p,
Status: "added",
Content: obj.Data,
})
mu.Unlock()
}()
}
wg.Wait()
if firstErr != nil {
return nil, firstErr
}
return diff, nil
}
func isBinary(data []byte) bool {
for _, b := range data {
if b == 0 {
return true
}
}
return false
}
func diffTextLines(oldData []byte, newData []byte) []DiffLine {
oldText := string(oldData)
newText := string(newData)
oldLines := splitLines(oldText)
newLines := splitLines(newText)
ops := lcsOperations(oldLines, newLines)
allLines := make([]DiffLine, 0, len(ops))
oldIndex := 1
newIndex := 1
for i := 0; i < len(ops); i++ {
op := ops[i]
var next *lcsOp
if i+1 < len(ops) {
next = &ops[i+1]
}
if op.typ == "del" && next != nil && next.typ == "add" {
allLines = append(allLines, DiffLine{
Status: "changed",
Index: newIndex,
Text: next.line,
})
oldIndex++
newIndex++
i++
continue
}
if op.typ == "add" && next != nil && next.typ == "del" {
allLines = append(allLines, DiffLine{
Status: "changed",
Index: newIndex,
Text: op.line,
})
oldIndex++
newIndex++
i++
continue
}
if op.typ == "add" {
allLines = append(allLines, DiffLine{
Status: "added",
Index: newIndex,
Text: op.line,
})
newIndex++
continue
}
if op.typ == "del" {
allLines = append(allLines, DiffLine{
Status: "deleted",
Index: oldIndex,
Text: op.line,
})
oldIndex++
continue
}
oldIndex++
newIndex++
}
if len(allLines) == 0 {
return allLines
}
keep := make([]bool, len(allLines))
for i := 0; i < len(allLines); i++ {
start := i - 3
if start < 0 {
start = 0
}
end := i + 3
if end >= len(allLines) {
end = len(allLines) - 1
}
for j := start; j <= end; j++ {
keep[j] = true
}
}
result := make([]DiffLine, 0, len(allLines))
for i := 0; i < len(allLines); i++ {
if keep[i] {
result = append(result, allLines[i])
}
}
return result
}
type lcsOp struct {
typ string
line string
}
func splitLines(text string) []string {
lines := strings.Split(text, "\n")
if len(lines) > 0 && lines[len(lines)-1] == "" {
lines = lines[:len(lines)-1]
}
return lines
}
func lcsOperations(oldLines []string, newLines []string) []lcsOp {
n := len(oldLines)
m := len(newLines)
dp := make([][]uint32, n+1)
for i := range dp {
dp[i] = make([]uint32, m+1)
}
for i := 1; i <= n; i++ {
for j := 1; j <= m; j++ {
if oldLines[i-1] == newLines[j-1] {
dp[i][j] = dp[i-1][j-1] + 1
} else if dp[i-1][j] >= dp[i][j-1] {
dp[i][j] = dp[i-1][j]
} else {
dp[i][j] = dp[i][j-1]
}
}
}
ops := make([]lcsOp, 0, n+m)
i := n
j := m
for i > 0 || j > 0 {
if i > 0 && j > 0 && oldLines[i-1] == newLines[j-1] {
ops = append(ops, lcsOp{typ: "equal", line: oldLines[i-1]})
i--
j--
continue
}
if i > 0 && (j == 0 || dp[i-1][j] >= dp[i][j-1]) {
ops = append(ops, lcsOp{typ: "del", line: oldLines[i-1]})
i--
continue
}
if j > 0 {
ops = append(ops, lcsOp{typ: "add", line: newLines[j-1]})
j--
}
}
for i, j := 0, len(ops)-1; i < j; i, j = i+1, j-1 {
ops[i], ops[j] = ops[j], ops[i]
}
return ops
}
func computeTreeDiffs(
url string,
treeHash string,
parentTreeHash string,
basePath string,
added map[string]TreeEntry,
deleted map[string]TreeEntry,
changed map[string]*changedEntry,
unchanged map[string]bool,
) error {
var newTree []TreeEntry
var oldTree []TreeEntry
if treeHash != "" {
obj, err := GetObject(url, treeHash)
if err != nil {
return err
}
if obj != nil {
newTree = ParseTree(obj.Data)
}
}
if parentTreeHash != "" {
obj, err := GetObject(url, parentTreeHash)
if err != nil {
return err
}
if obj != nil {
oldTree = ParseTree(obj.Data)
}
}
for _, entry := range newTree {
var old *TreeEntry
for _, o := range oldTree {
if o.Path == entry.Path {
o := o
old = &o
break
}
}
if old != nil {
delete(added, basePath+entry.Path)
if old.Hash == entry.Hash {
unchanged[basePath+entry.Path] = true
} else {
if entry.IsDir {
err := computeTreeDiffs(url, entry.Hash, old.Hash, basePath+entry.Path+"/", added, deleted, changed, unchanged)
if err != nil {
return err
}
} else {
if existing, exists := changed[basePath+entry.Path]; !exists {
changed[basePath+entry.Path] = &changedEntry{
newVersion: entry,
oldVersions: []TreeEntry{*old},
}
} else {
existing.oldVersions = append(existing.oldVersions, *old)
}
}
}
} else {
if entry.IsDir {
err := computeTreeDiffs(url, entry.Hash, "", basePath+entry.Path+"/", added, deleted, changed, unchanged)
if err != nil {
return err
}
} else {
added[basePath+entry.Path] = entry
}
}
}
for _, old := range oldTree {
if unchanged[basePath+old.Path] || changed[basePath+old.Path] != nil {
continue
}
if old.IsDir {
err := computeTreeDiffs(url, "", old.Hash, basePath+old.Path+"/", added, deleted, changed, unchanged)
if err != nil {
return err
}
} else {
deleted[basePath+old.Path] = old
}
}
return nil
}
+264
View File
@@ -0,0 +1,264 @@
package gitnaturalapi
import (
"fmt"
"slices"
"strings"
)
type MissingCapability struct {
URL string
Capability string
}
func (e *MissingCapability) Error() string {
return fmt.Sprintf("server at %s is missing required capability %s", e.URL, e.Capability)
}
func prepareRequest(url string, commitOrRef string, needFilter bool) (resolvedRef string, capabilities []string, err error) {
var info *InfoRefsUploadPackResponse
if strings.HasPrefix(commitOrRef, "refs/") {
info, err = GetInfoRefs(url)
if err != nil {
return "", nil, err
}
resolved, ok := info.Refs[commitOrRef]
if !ok {
return "", nil, fmt.Errorf("ref %s not found", commitOrRef)
}
commitOrRef = resolved
}
caps, err := GetCapabilities(url, info)
if err != nil {
return "", nil, err
}
for _, c := range DefaultCapabilities {
if slices.Contains(caps, c) {
capabilities = append(capabilities, c)
}
}
for _, c := range NecessaryCapabilities {
if slices.Contains(caps, c) {
capabilities = append(capabilities, c)
} else {
return "", nil, &MissingCapability{URL: url, Capability: c}
}
}
for _, c := range RequiredCapabilities {
if !slices.Contains(caps, c) {
return "", nil, &MissingCapability{URL: url, Capability: c}
}
}
if needFilter {
if slices.Contains(caps, "filter") {
capabilities = append(capabilities, "filter")
} else {
return "", nil, &MissingCapability{URL: url, Capability: "filter"}
}
}
return commitOrRef, capabilities, nil
}
func GetObject(url string, blobHash string) (*ParsedObject, error) {
ref, caps, err := prepareRequest(url, blobHash, false)
if err != nil {
return nil, err
}
deepen := 1
want, err := CreateWantRequest(ref, caps, &deepen, "")
if err != nil {
return nil, err
}
result, err := FetchPackfile(url, want)
if err != nil {
return nil, err
}
return result.Objects[blobHash], nil
}
func GetDirectoryTreeAt(url string, commitOrRef string, nestLimit *int) (*Tree, error) {
ref, caps, err := prepareRequest(url, commitOrRef, true)
if err != nil {
return nil, err
}
want, err := CreateWantRequest(ref, caps, nestLimit, "blob:none")
if err != nil {
return nil, err
}
result, err := FetchPackfile(url, want)
if err != nil {
return nil, err
}
commit := result.Objects[ref]
if commit == nil {
return nil, fmt.Errorf("commit %s not found in packfile", ref)
}
treeHash := string(commit.Data[5:45])
rootTree := result.Objects[treeHash]
if rootTree == nil {
return nil, fmt.Errorf("root tree %s not found in packfile", treeHash)
}
return LoadTree(rootTree, result.Objects, nestLimit), nil
}
func ShallowCloneRepositoryAt(url string, commitOrRef string) (*Commit, *Tree, error) {
ref, caps, err := prepareRequest(url, commitOrRef, false)
if err != nil {
return nil, nil, err
}
deepen := 1
want, err := CreateWantRequest(ref, caps, &deepen, "")
if err != nil {
return nil, nil, err
}
result, err := FetchPackfile(url, want)
if err != nil {
return nil, nil, err
}
commitObj := result.Objects[ref]
if commitObj == nil {
return nil, nil, fmt.Errorf("commit %s not found in packfile", ref)
}
treeHash := string(commitObj.Data[5:45])
rootTree := result.Objects[treeHash]
if rootTree == nil {
return nil, nil, fmt.Errorf("root tree %s not found in packfile", treeHash)
}
commit, err := ParseCommit(commitObj.Data, commitObj.Hash)
if err != nil {
return nil, nil, err
}
tree := LoadTree(rootTree, result.Objects, nil)
return commit, tree, nil
}
func FetchCommitsOnly(url string, commitOrRef string, maxCommits *int) ([]*Commit, error) {
ref, caps, err := prepareRequest(url, commitOrRef, true)
if err != nil {
return nil, err
}
want, err := CreateWantRequest(ref, caps, maxCommits, "tree:0")
if err != nil {
return nil, err
}
result, err := FetchPackfile(url, want)
if err != nil {
return nil, err
}
commitMap := make(map[string]*Commit, len(result.Objects))
for hash, obj := range result.Objects {
commit, err := ParseCommit(obj.Data, hash)
if err != nil {
return nil, err
}
commitMap[hash] = commit
}
// sort topologically starting from the requested ref
sorted := make([]*Commit, 0, len(commitMap))
visited := make(map[string]bool, len(commitMap))
var visit func(hash string)
visit = func(hash string) {
if visited[hash] {
return
}
c, ok := commitMap[hash]
if !ok {
return
}
visited[hash] = true
sorted = append(sorted, c)
for _, parent := range c.Parents {
visit(parent)
}
}
visit(ref)
for _, c := range commitMap {
if !visited[c.Hash] {
sorted = append(sorted, c)
}
}
return sorted, nil
}
func GetSingleCommit(url string, commitOrRef string) (*Commit, error) {
maxCommits := 1
commits, err := FetchCommitsOnly(url, commitOrRef, &maxCommits)
if err != nil {
return nil, err
}
if len(commits) == 0 {
return nil, fmt.Errorf("no commit found for reference: %s", commitOrRef)
}
return commits[0], nil
}
func GetObjectByPath(url string, commitOrRef string, path string) (*TreeEntry, error) {
normalizedPath := strings.ReplaceAll(path, "\\", "/")
normalizedPath = strings.TrimLeft(normalizedPath, "/")
normalizedPath = strings.TrimRight(normalizedPath, "/")
var pathSegments []string
if normalizedPath != "" {
pathSegments = strings.Split(normalizedPath, "/")
}
requiredDepth := len(pathSegments)
tree, err := GetDirectoryTreeAt(url, commitOrRef, &requiredDepth)
if err != nil {
return nil, err
}
currentLevel := tree
nextSegment:
for i, segment := range pathSegments {
isLastSegment := i == len(pathSegments)-1
for _, dir := range currentLevel.Directories {
if dir.Name == segment {
if isLastSegment {
return &TreeEntry{Path: segment, Mode: "40000", IsDir: true, Hash: dir.Hash}, nil
}
if dir.Content != nil {
currentLevel = dir.Content
continue nextSegment
}
return nil, nil
}
}
if isLastSegment {
for _, file := range currentLevel.Files {
if file.Name == segment {
return &TreeEntry{Path: segment, Mode: "100644", IsDir: false, Hash: file.Hash}, nil
}
}
}
return nil, nil
}
return nil, nil
}
+289
View File
@@ -0,0 +1,289 @@
package gitnaturalapi
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestGetRefs(t *testing.T) {
info, err := GetInfoRefs("https://codeberg.org/dluvian/gitplaza.git")
require.NoError(t, err)
require.Contains(t, info.Capabilities, "shallow")
require.Contains(t, info.Capabilities, "object-format=sha1")
require.Greater(t, len(info.Refs), 5)
require.Contains(t, info.Refs, "refs/heads/master")
require.Equal(t, "a04d0761564b0d23c5edbadf494ab4f1cc4656f4", info.Refs["refs/tags/v0.1.0"])
require.Equal(t, "refs/heads/master", info.Symrefs["HEAD"])
}
func TestGetOnlyTreeAtCurrentCommit(t *testing.T) {
urls := []string{
"https://codeberg.org/dluvian/gitplaza.git",
"https://github.com/fiatjaf/pyramid.git",
"https://pyramid.fiatjaf.com/npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6/nostrlib.git",
}
for _, url := range urls {
t.Run(url, func(t *testing.T) {
tree, err := GetDirectoryTreeAt(url, "refs/heads/master", nil)
require.NoError(t, err)
for _, file := range tree.Files {
require.Nil(t, file.Content, "file %q should have nil content at %s", file.Name, url)
}
require.Greater(t, len(tree.Directories), 2, "at %s", url)
})
}
}
func TestCloneRepositoryAtCurrentCommit(t *testing.T) {
urls := []string{
"https://codeberg.org/dluvian/gitplaza.git",
"https://github.com/fiatjaf/pyramid.git",
"https://pyramid.fiatjaf.com/npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6/nostrlib.git",
}
for _, url := range urls {
t.Run(url, func(t *testing.T) {
commit, tree, err := ShallowCloneRepositoryAt(url, "refs/heads/master")
require.NoError(t, err)
require.Greater(t, len(tree.Files), 5, "at %s", url)
require.Greater(t, len(tree.Directories), 2, "at %s", url)
info, err := GetInfoRefs(url)
require.NoError(t, err)
require.Equal(t, info.Refs["refs/heads/master"], commit.Hash, "at %s", url)
})
}
}
func TestGetSpecificObject(t *testing.T) {
url := "https://codeberg.org/dluvian/gitplaza.git"
hash := "0f9438a8fd68594cd663fb8dbd23c5f5139f5263" // shell.nix
blob, err := GetObject(url, hash)
require.NoError(t, err)
require.NotNil(t, blob)
require.Equal(t, ObjectTypeBlob, blob.Type)
expected := "(builtins.getFlake\n (\"git+file://\" + toString ./.)).devShells.${builtins.currentSystem}.default\n"
require.Equal(t, expected, string(blob.Data))
}
func TestGetNonExistentCommit(t *testing.T) {
urls := []string{
"https://codeberg.org/dluvian/gitplaza.git",
"https://pyramid.fiatjaf.com/npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6/nostrlib.git",
"https://github.com/fiatjaf/nak.git",
}
commit := "1d4438a8fd68594cd663fb8dbd23c5f5139fabcd" // doesn't exist
for _, url := range urls {
t.Run(url, func(t *testing.T) {
_, err := GetDirectoryTreeAt(url, commit, nil)
require.Error(t, err)
var missingRef *MissingRef
require.ErrorAs(t, err, &missingRef)
})
}
}
func TestFetchListOfCommits(t *testing.T) {
commits, err := FetchCommitsOnly(
"https://pyramid.fiatjaf.com/npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6/nostrlib.git",
"refs/heads/master",
nil,
)
require.NoError(t, err)
require.Greater(t, len(commits), 10)
}
func TestFetch10PastCommits(t *testing.T) {
maxCommits := 10
commits, err := FetchCommitsOnly(
"https://github.com/fiatjaf/pyramid.git",
"57712756e37d7c60d1ac53e0f6b59e9ecad67c9a",
&maxCommits,
)
require.NoError(t, err)
require.Len(t, commits, 10)
c := commits[1]
require.Equal(t, "49c1b48f5120bad4089535a190d2233c96188fa2", c.Hash)
require.Equal(t, "286786a6f1072a2ef5ae057fbb611858b8e88bc4", c.Tree)
require.Equal(t, []string{"1599e46c0ee6f460e25048880754868d4f9644fd"}, c.Parents)
require.Equal(t, "fiatjaf", c.Author.Name)
require.Equal(t, "fiatjaf@gmail.com", c.Author.Email)
require.Equal(t, int64(1767157644), c.Author.Timestamp)
require.Equal(t, "-0300", c.Author.Timezone)
require.Equal(t, "fiatjaf", c.Committer.Name)
require.Equal(t, "fiatjaf@gmail.com", c.Committer.Email)
require.Equal(t, int64(1767157724), c.Committer.Timestamp)
require.Equal(t, "-0300", c.Committer.Timezone)
require.Equal(t, "scheduled events.\n", c.Message)
expectedMsg5 := "turn off groups logic on QueryStore and PreventBroadcast when groups is turned off.\n\nthis was causing crashes that Golang's bizarre iter API showed as happening inside SortedMerge.\n"
require.Equal(t, expectedMsg5, commits[5].Message)
}
func TestGetSingleCommit(t *testing.T) {
url := "https://github.com/fiatjaf/pyramid.git"
commit, err := GetSingleCommit(url, "5e982dd1122a0bb1b0154c222ec4ba841f3820c6")
require.NoError(t, err)
require.Equal(t, "5e982dd1122a0bb1b0154c222ec4ba841f3820c6", commit.Hash)
require.Equal(t, "fiatjaf", commit.Author.Name)
require.Equal(t, "validate incoming git-related stuff.\n", commit.Message)
}
func TestGetDirectoryTreeWithDepthLimit(t *testing.T) {
url := "https://github.com/fiatjaf/pyramid.git"
fullTree, err := GetDirectoryTreeAt(url, "refs/heads/master", nil)
require.NoError(t, err)
depth := 1
shallowTree, err := GetDirectoryTreeAt(url, "refs/heads/master", &depth)
require.NoError(t, err)
require.Equal(t, len(fullTree.Directories), len(shallowTree.Directories))
for _, dir := range shallowTree.Directories {
require.NotNil(t, dir.Content, "directory %q content should not be nil at depth 1", dir.Name)
for _, file := range dir.Content.Files {
require.NotEmpty(t, file.Name)
require.Nil(t, file.Content, "file %q content should be nil", file.Name)
}
for _, subdir := range dir.Content.Directories {
require.NotEmpty(t, subdir.Name)
require.Nil(t, subdir.Content, "subdir %q content should be nil at depth 1", subdir.Name)
}
}
require.Equal(t, len(fullTree.Files), len(shallowTree.Files))
}
func TestGetObjectByPathExistingFile(t *testing.T) {
url := "https://codeberg.org/dluvian/gitplaza.git"
entry, err := GetObjectByPath(url, "refs/heads/master", "README.md")
require.NoError(t, err)
require.NotNil(t, entry)
require.Equal(t, "README.md", entry.Path)
require.False(t, entry.IsDir)
require.Equal(t, "100644", entry.Mode)
require.NotEmpty(t, entry.Hash)
}
func TestGetObjectByPathExistingDirectory(t *testing.T) {
url := "https://codeberg.org/dluvian/gitplaza.git"
entry, err := GetObjectByPath(url, "refs/heads/master", "src")
require.NoError(t, err)
require.NotNil(t, entry)
require.Equal(t, "src", entry.Path)
require.True(t, entry.IsDir)
require.Equal(t, "40000", entry.Mode)
require.NotEmpty(t, entry.Hash)
}
func TestGetObjectByPathNestedFile(t *testing.T) {
url := "https://github.com/fiatjaf/pyramid.git"
entry, err := GetObjectByPath(url, "d567c18cd5c144a58b0214216f454b3caf49d4ff", "grasp/grasp.templ")
require.NoError(t, err)
require.NotNil(t, entry)
require.Equal(t, "grasp.templ", entry.Path)
require.False(t, entry.IsDir)
require.Equal(t, "100644", entry.Mode)
require.Equal(t, "05bce14339ece5f48c670d0592faa8dece9e8957", entry.Hash)
}
func TestGetObjectByPathNonExistent(t *testing.T) {
url := "https://codeberg.org/dluvian/gitplaza.git"
entry, err := GetObjectByPath(url, "refs/heads/master", "whatever/something/x/y/z/non-existent-file.txt")
require.NoError(t, err)
require.Nil(t, entry)
}
func TestGetCommitDiff(t *testing.T) {
url := "https://github.com/smallhelm/diff-lines.git"
diff, err := GetCommitDiff(url, "a73592653fe9d01f948ca3035e088e45f722eca7")
require.NoError(t, err)
require.NotNil(t, diff)
require.Len(t, diff, 5)
byPath := make(map[string]DiffFile, len(diff))
for _, f := range diff {
byPath[f.Path] = f
}
for _, tc := range []struct {
path string
status string
}{
{".travis.yml", "added"},
{".gitignore", "added"},
{"index.js", "added"},
{"tests.js", "added"},
{"package.json", "changed"},
} {
f, ok := byPath[tc.path]
require.True(t, ok, "missing diff file %q", tc.path)
require.Equal(t, tc.status, f.Status, "%s status", tc.path)
}
gitignore, ok := byPath[".gitignore"]
require.True(t, ok, "missing .gitignore in diff")
require.Equal(t, "/node_modules\n", string(gitignore.Content))
pkg, ok := byPath["package.json"]
require.True(t, ok, "missing package.json in diff")
require.NotEmpty(t, pkg.Lines, "package.json should have diff lines")
normalizeLineStatus := func(status string) string {
if status == "same" {
return "changed"
}
return status
}
lineByIndex := make(map[int]DiffLine, len(pkg.Lines))
for _, line := range pkg.Lines {
lineByIndex[line.Index] = line
}
expectedLines := []struct {
Index int
Status string
Text string
}{
{Index: 22, Status: "added", Text: " },"},
{Index: 23, Status: "added", Text: " \"homepage\": \"https://github.com/smallhelm/diff-lines#readme\","},
{Index: 24, Status: "added", Text: " \"devDependencies\": {"},
{Index: 25, Status: "added", Text: " \"tape\": \"^4.6.0\""},
{Index: 27, Status: "added", Text: " \"dependencies\": {"},
{Index: 28, Status: "added", Text: " \"diff\": \"^2.2.3\""},
{Index: 29, Status: "changed", Text: " }"},
}
actualLines := make([]struct {
Index int
Status string
Text string
}, 0, len(expectedLines))
for _, expected := range expectedLines {
line, ok := lineByIndex[expected.Index]
require.True(t, ok, "missing package.json diff line %d", expected.Index)
actualLines = append(actualLines, struct {
Index int
Status string
Text string
}{
Index: line.Index,
Status: normalizeLineStatus(line.Status),
Text: line.Text,
})
}
require.Equal(t, expectedLines, actualLines)
}
+154
View File
@@ -0,0 +1,154 @@
package gitnaturalapi
import (
"bytes"
"fmt"
"io"
"net/http"
"strconv"
"strings"
)
var NecessaryCapabilities = []string{
"multi_ack_detailed",
"side-band-64k",
}
var RequiredCapabilities = []string{
"shallow",
"object-format=sha1",
}
var DefaultCapabilities = []string{
"ofs-delta",
"no-progress",
}
type MissingRef struct{}
func (e *MissingRef) Error() string { return "missing ref" }
type InvalidCommit struct {
Commit string
}
func (e *InvalidCommit) Error() string {
return fmt.Sprintf("invalid commit '%s', must be 20 byte hex", e.Commit)
}
func FetchPackfile(url string, want string) (*PackfileResult, error) {
req, err := http.NewRequest("POST", url+"/git-upload-pack", strings.NewReader(want))
if err != nil {
return nil, fmt.Errorf("failed to create git-upload-pack request: %w", err)
}
req.Header.Set("Content-Type", "application/x-git-upload-pack-request")
req.Header.Set("Accept", "application/x-git-upload-pack-result")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to call git-upload-pack: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("failed to call git-upload-pack: %s", string(body))
}
data, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read git-upload-pack response: %w", err)
}
if len(data) == 0 {
return nil, fmt.Errorf("empty response")
}
offset := 0
for offset < len(data) {
prev := offset
if prev+1 >= len(data) {
break
}
nlIdx := bytes.IndexByte(data[prev+1:], '\n')
if nlIdx == -1 {
if len(data) >= 32 && string(data[4:32]) == "ERR upload-pack: not our ref" {
return nil, &MissingRef{}
}
end := len(data)
if end > 63 {
end = 63
}
return nil, fmt.Errorf("unexpected '%s'", string(data[:end]))
}
offset = prev + nlIdx + 1
if offset >= 3 && string(data[offset-3:offset]) == "NAK" {
break
}
}
offset++
var packfileData []byte
for offset < len(data) {
if offset+5 > len(data) {
break
}
pktLen, err := strconv.ParseInt(string(data[offset:offset+4]), 16, 32)
if err != nil {
break
}
length := int(pktLen)
if length == 0 {
break
}
if offset+length > len(data) {
break
}
if data[offset+4] == 2 {
// progress message, ignore
} else if data[offset+4] == 1 {
packfileData = append(packfileData, data[offset+5:offset+length]...)
}
offset += length
}
if len(packfileData) == 0 {
return nil, &MissingRef{}
}
return ParsePackfile(packfileData)
}
func CreateWantRequest(commitSha string, capabilities []string, deepen *int, filter string) (string, error) {
if len(commitSha) != 40 {
return "", &InvalidCommit{Commit: commitSha}
}
var buf strings.Builder
wantLine := fmt.Sprintf("want %s %s agent=nsa/1.0.0\n", commitSha, strings.Join(capabilities, " "))
buf.WriteString(pktEncode(wantLine))
if deepen != nil {
deepenLine := fmt.Sprintf("deepen %d\n", *deepen)
buf.WriteString(pktEncode(deepenLine))
}
if filter != "" {
filterLine := fmt.Sprintf("filter %s\n", filter)
buf.WriteString(pktEncode(filterLine))
}
buf.WriteString("0000")
buf.WriteString(pktEncode("done\n"))
return buf.String(), nil
}
func pktEncode(data string) string {
if len(data) == 0 {
return "0000"
}
length := len(data) + 4
return fmt.Sprintf("%04x%s", length, data)
}
+307
View File
@@ -0,0 +1,307 @@
package gitnaturalapi
import (
"bytes"
"compress/zlib"
"crypto/sha1"
"encoding/binary"
"encoding/hex"
"fmt"
"io"
)
const (
ObjectTypeCommit = 1
ObjectTypeTree = 2
ObjectTypeBlob = 3
ObjectTypeTag = 4
ObjectTypeOfsDelta = 6
ObjectTypeRefDelta = 7
)
type ParsedObject struct {
Type int
Size int
Data []byte
Offset int
Hash string
}
type PackfileResult struct {
Version int
Count int
Objects map[string]*ParsedObject
}
func ParsePackfile(data []byte) (*PackfileResult, error) {
if len(data) < 12 {
return nil, fmt.Errorf("packfile too short")
}
header := string(data[0:4])
if header != "PACK" {
return nil, fmt.Errorf("invalid packfile header: %s", header)
}
version := int(binary.BigEndian.Uint32(data[4:8]))
if version != 2 {
return nil, fmt.Errorf("unsupported packfile version: %d", version)
}
count := int(binary.BigEndian.Uint32(data[8:12]))
objects := make(map[string]*ParsedObject)
pos := 12
for i := 0; i < count; i++ {
obj, newPos, err := parsePackObject(data, pos, objects)
if err != nil {
return nil, fmt.Errorf("error parsing object %d/%d: %w", i+1, count, err)
}
objects[obj.Hash] = obj
pos = newPos
}
return &PackfileResult{
Version: version,
Count: count,
Objects: objects,
}, nil
}
func parsePackObject(data []byte, startPos int, objects map[string]*ParsedObject) (*ParsedObject, int, error) {
pos := startPos
offset := startPos
b := data[pos]
pos++
objType := int((b >> 4) & 0x07)
size := int(b & 0x0f)
shift := 4
for b&0x80 != 0 {
b = data[pos]
pos++
size |= int(b&0x7f) << shift
shift += 7
}
var objData []byte
var err error
switch objType {
case ObjectTypeOfsDelta:
var actualType int
objData, pos, actualType, err = parseOfsDelta(data, pos, offset, objects)
if err != nil {
return nil, 0, err
}
objType = actualType
case ObjectTypeRefDelta:
var actualType int
objData, pos, actualType, err = parseRefDelta(data, pos, objects)
if err != nil {
return nil, 0, err
}
objType = actualType
case ObjectTypeCommit, ObjectTypeTree, ObjectTypeBlob, ObjectTypeTag:
objData, pos, err = zlibDecompress(data, pos)
if err != nil {
return nil, 0, err
}
default:
return nil, 0, fmt.Errorf("unknown object type: %d", objType)
}
hash, err := computeObjectHash(objType, objData)
if err != nil {
return nil, 0, err
}
return &ParsedObject{
Type: objType,
Size: size,
Data: objData,
Offset: offset,
Hash: hash,
}, pos, nil
}
func parseOfsDelta(data []byte, pos int, currentOffset int, objects map[string]*ParsedObject) ([]byte, int, int, error) {
b := data[pos]
pos++
offset := int(b & 0x7f)
for b&0x80 != 0 {
offset++
offset <<= 7
b = data[pos]
pos++
offset += int(b & 0x7f)
}
baseOffset := currentOffset - offset
baseObject, _, err := parsePackObject(data, baseOffset, objects)
if err != nil {
return nil, 0, 0, fmt.Errorf("failed to parse base object at offset %d: %w", baseOffset, err)
}
delta, newPos, err := zlibDecompress(data, pos)
if err != nil {
return nil, 0, 0, err
}
fullObj, err := applyDelta(delta, baseObject.Data)
if err != nil {
return nil, 0, 0, err
}
return fullObj, newPos, baseObject.Type, nil
}
func parseRefDelta(data []byte, pos int, objects map[string]*ParsedObject) ([]byte, int, int, error) {
baseName := hex.EncodeToString(data[pos : pos+20])
pos += 20
delta, newPos, err := zlibDecompress(data, pos)
if err != nil {
return nil, 0, 0, err
}
baseObject, ok := objects[baseName]
if !ok {
return nil, 0, 0, fmt.Errorf("base object not found with name %s", baseName)
}
fullObj, err := applyDelta(delta, baseObject.Data)
if err != nil {
return nil, 0, 0, err
}
return fullObj, newPos, baseObject.Type, nil
}
func computeObjectHash(objType int, data []byte) (string, error) {
var typeStr string
switch objType {
case ObjectTypeCommit:
typeStr = "commit"
case ObjectTypeTree:
typeStr = "tree"
case ObjectTypeBlob:
typeStr = "blob"
case ObjectTypeTag:
typeStr = "tag"
default:
return "", fmt.Errorf("unknown type when computing object hash: %d", objType)
}
header := fmt.Sprintf("%s %d\x00", typeStr, len(data))
h := sha1.New()
h.Write([]byte(header))
h.Write(data)
return hex.EncodeToString(h.Sum(nil)), nil
}
func applyDelta(delta []byte, base []byte) ([]byte, error) {
pos := 0
_, bytesRead := readVariableInt(delta, pos)
pos += bytesRead
resultSize, bytesRead := readVariableInt(delta, pos)
pos += bytesRead
result := make([]byte, resultSize)
resultOffset := 0
for pos < len(delta) {
cmd := delta[pos]
pos++
if cmd&0x80 != 0 {
var copyOffset, copySize int
if cmd&0x01 != 0 {
copyOffset = int(delta[pos])
pos++
}
if cmd&0x02 != 0 {
copyOffset |= int(delta[pos]) << 8
pos++
}
if cmd&0x04 != 0 {
copyOffset |= int(delta[pos]) << 16
pos++
}
if cmd&0x08 != 0 {
copyOffset |= int(delta[pos]) << 24
pos++
}
if cmd&0x10 != 0 {
copySize = int(delta[pos])
pos++
}
if cmd&0x20 != 0 {
copySize |= int(delta[pos]) << 8
pos++
}
if cmd&0x40 != 0 {
copySize |= int(delta[pos]) << 16
pos++
}
if copySize == 0 {
copySize = 0x10000
}
copy(result[resultOffset:], base[copyOffset:copyOffset+copySize])
resultOffset += copySize
} else if cmd > 0 {
copy(result[resultOffset:], delta[pos:pos+int(cmd)])
pos += int(cmd)
resultOffset += int(cmd)
} else {
return nil, fmt.Errorf("invalid delta command")
}
}
return result, nil
}
func zlibDecompress(data []byte, pos int) ([]byte, int, error) {
br := bytes.NewReader(data[pos:])
r, err := zlib.NewReader(br)
if err != nil {
return nil, 0, fmt.Errorf("zlib init error: %w", err)
}
decompressed, err := io.ReadAll(r)
r.Close()
if err != nil {
return nil, 0, fmt.Errorf("zlib decompress error: %w", err)
}
newPos := len(data) - br.Len()
return decompressed, newPos, nil
}
func readVariableInt(data []byte, pos int) (int, int) {
value := 0
shift := 0
bytesRead := 0
for {
b := data[pos]
pos++
bytesRead++
value |= int(b&0x7f) << shift
shift += 7
if b&0x80 == 0 {
break
}
}
return value, bytesRead
}
+120
View File
@@ -0,0 +1,120 @@
package gitnaturalapi
import (
"fmt"
"io"
"net/http"
"strconv"
"strings"
"sync"
)
type InfoRefsUploadPackResponse struct {
Refs map[string]string
Capabilities []string
Symrefs map[string]string
}
var capabilitiesCache sync.Map
func GetCapabilities(url string, existingInfo *InfoRefsUploadPackResponse) ([]string, error) {
if existingInfo != nil {
capabilitiesCache.Store(url, existingInfo.Capabilities)
return existingInfo.Capabilities, nil
}
if cached, ok := capabilitiesCache.Load(url); ok {
return cached.([]string), nil
}
info, err := GetInfoRefs(url)
if err != nil {
return nil, err
}
capabilitiesCache.Store(url, info.Capabilities)
return info.Capabilities, nil
}
func GetInfoRefs(url string) (*InfoRefsUploadPackResponse, error) {
resp, err := http.Get(url + "/info/refs?service=git-upload-pack")
if err != nil {
return nil, fmt.Errorf("failed to fetch info/refs: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read info/refs response: %w", err)
}
response := string(body)
result := &InfoRefsUploadPackResponse{
Refs: make(map[string]string),
Symrefs: make(map[string]string),
}
lines := strings.Split(response, "\n")
firstRef := true
for _, line := range lines {
if len(line) == 0 {
continue
}
if strings.HasPrefix(line, "0000") {
line = line[4:]
}
if len(line) < 4 {
continue
}
length, err := strconv.ParseInt(line[:4], 16, 32)
if err != nil {
continue
}
endIdx := int(length)
if endIdx > len(line) {
endIdx = len(line)
}
if endIdx <= 4 {
continue
}
content := line[4:endIdx]
if firstRef && strings.HasPrefix(content, "# service=") {
firstRef = false
continue
}
if !strings.Contains(content, " ") {
continue
}
parts := strings.SplitN(content, " ", 2)
hash := parts[0]
refAndCaps := parts[1]
if strings.Contains(refAndCaps, "\x00") {
nulParts := strings.SplitN(refAndCaps, "\x00", 2)
ref := strings.TrimSpace(nulParts[0])
result.Refs[ref] = hash
caps := strings.Fields(nulParts[1])
result.Capabilities = caps
for _, cap := range caps {
if strings.HasPrefix(cap, "symref=") {
symrefData := cap[7:]
colonIdx := strings.Index(symrefData, ":")
if colonIdx != -1 {
result.Symrefs[symrefData[:colonIdx]] = symrefData[colonIdx+1:]
}
}
}
} else {
result.Refs[strings.TrimSpace(refAndCaps)] = hash
}
}
return result, nil
}
+101
View File
@@ -0,0 +1,101 @@
package gitnaturalapi
import "encoding/hex"
type TreeEntry struct {
Path string
Mode string
IsDir bool
Hash string
}
type TreeFile struct {
Name string
Hash string
Content []byte
}
type TreeDirectory struct {
Name string
Hash string
Content *Tree
}
type Tree struct {
Directories []TreeDirectory
Files []TreeFile
}
func LoadTree(obj *ParsedObject, objects map[string]*ParsedObject, depth *int) *Tree {
directories := make([]TreeDirectory, 0)
files := make([]TreeFile, 0)
entries := ParseTree(obj.Data)
for _, entry := range entries {
child := objects[entry.Hash]
if entry.IsDir {
var content *Tree
if child != nil && (depth == nil || *depth > 0) {
var newDepth *int
if depth != nil {
d := *depth - 1
newDepth = &d
}
content = LoadTree(child, objects, newDepth)
}
directories = append(directories, TreeDirectory{
Name: entry.Path,
Hash: entry.Hash,
Content: content,
})
} else {
var content []byte
if child != nil {
content = child.Data
}
files = append(files, TreeFile{
Name: entry.Path,
Hash: entry.Hash,
Content: content,
})
}
}
return &Tree{Directories: directories, Files: files}
}
func ParseTree(treeData []byte) []TreeEntry {
entries := make([]TreeEntry, 0)
offset := 0
for offset < len(treeData) {
modeEnd := offset
for treeData[modeEnd] != 0x20 {
modeEnd++
}
mode := string(treeData[offset:modeEnd])
offset = modeEnd + 1
filenameEnd := offset
for treeData[filenameEnd] != 0x00 {
filenameEnd++
}
path := string(treeData[offset:filenameEnd])
offset = filenameEnd + 1
hash := hex.EncodeToString(treeData[offset : offset+20])
offset += 20
isDir := mode == "40000" || mode == "040000"
entries = append(entries, TreeEntry{
Mode: mode,
Path: path,
Hash: hash,
IsDir: isDir,
})
}
return entries
}
+25
View File
@@ -0,0 +1,25 @@
package grasp
import (
"net/url"
"strings"
"fiatjaf.com/nostr/nip19"
)
func IsGraspURL(u string) bool {
parsed, err := url.Parse(u)
if err != nil {
return false
}
if strings.Count(parsed.Path, "/") != 2 || len(parsed.Path) < 65 {
return false
}
if prefix, _, err := nip19.Decode(parsed.Path[1:64]); err != nil || prefix != "npub" {
return false
}
return true
}
+1 -1
View File
@@ -112,7 +112,7 @@ func NewBunker(
onAuth func(string),
) *BunkerClient {
if pool == nil {
pool = nostr.NewPool(nostr.PoolOptions{})
pool = nostr.NewPool()
}
clientPublicKey := nostr.GetPublicKey(clientSecretKey)
+1 -1
View File
@@ -67,7 +67,7 @@ func NewBunkerFromNostrConnect(
pool *nostr.Pool,
) (*BunkerClient, error) {
if pool == nil {
pool = nostr.NewPool(nostr.PoolOptions{})
pool = nostr.NewPool()
}
if len(relayURLs) == 0 {
+12 -8
View File
@@ -11,20 +11,24 @@ import (
)
func NormalizeIdentifier(name string) string {
name = strings.TrimSpace(strings.ToLower(name))
res, _, _ := transform.Bytes(norm.NFKC, []byte(name))
runes := []rune(string(res))
runes := []rune(strings.ToLower(string(res)))
b := make([]rune, len(runes))
for i, letter := range runes {
words := make([]string, 0, 3)
word := make([]rune, 0, 12)
for _, letter := range runes {
if unicode.IsLetter(letter) || unicode.IsNumber(letter) {
b[i] = letter
} else {
b[i] = '-'
word = append(word, letter)
} else if len(word) > 0 {
words = append(words, string(word))
word = make([]rune, 0, 12)
}
}
if len(word) > 0 {
words = append(words, string(word))
}
return string(b)
return strings.Join(words, "-")
}
func ArticleAsHTML(content string) string {
+1 -1
View File
@@ -13,7 +13,7 @@ func TestNormalization(t *testing.T) {
}{
{" hello ", "hello"},
{"Goodbye", "goodbye"},
{"the long and winding road / that leads to your door", "the-long-and-winding-road---that-leads-to-your-door"},
{"the long and winding road / that leads to your door", "the-long-and-winding-road-that-leads-to-your-door"},
{"it's 平仮名", "it-s-平仮名"},
} {
if norm := NormalizeIdentifier(vector.before); norm != vector.after {
+38
View File
@@ -0,0 +1,38 @@
package nip5a
import (
"fmt"
"math/big"
"strings"
"fiatjaf.com/nostr"
)
func NormalizePath(p string) string {
if !strings.HasSuffix(p, ".html") && !strings.HasSuffix(p, "/") {
return p
}
if strings.HasSuffix(p, "/") {
return p + "index.html"
}
return p
}
func PubKeyFromBase36(value string) (nostr.PubKey, error) {
bi, ok := new(big.Int).SetString(value, 36)
if !ok {
return nostr.ZeroPK, fmt.Errorf("invalid base36 pubkey")
}
buf := bi.Bytes()
if len(buf) > 32 {
return nostr.ZeroPK, fmt.Errorf("base36 pubkey too long")
}
var pk nostr.PubKey
copy(pk[32-len(buf):], buf)
return pk, nil
}
func PubKeyToBase36(pubkey nostr.PubKey) string {
value := new(big.Int).SetBytes(pubkey[:]).Text(36)
return strings.Repeat("0", 50-len(value)) + value
}
+149
View File
@@ -0,0 +1,149 @@
package nip5a
import (
"encoding/hex"
"fmt"
"regexp"
"strings"
"unsafe"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/nip19"
)
type SiteManifest struct {
Event *nostr.Event
Pubkey nostr.PubKey
Root bool
Identifier string
Paths map[string][32]byte
Servers []string
Title string
Description string
Source string
}
func ParseSiteManifest(event *nostr.Event) (*SiteManifest, error) {
sm := &SiteManifest{Event: event}
switch event.Kind {
case nostr.KindNsiteRoot:
sm.Root = true
case nostr.KindNsiteNamed:
sm.Root = false
for _, tag := range event.Tags {
if len(tag) >= 2 && tag[0] == "d" {
sm.Identifier = tag[1]
break
}
}
if sm.Identifier == "" {
return nil, fmt.Errorf("named site manifest missing d tag")
}
default:
return nil, fmt.Errorf("invalid site manifest kind: %d", event.Kind)
}
sm.Pubkey = event.PubKey
sm.Paths = make(map[string][32]byte, len(event.Tags))
for _, tag := range event.Tags {
if len(tag) < 2 {
continue
}
switch tag[0] {
case "path":
var hash [32]byte
if len(tag[2]) != 64 {
return nil, fmt.Errorf("invalid hash '%s' for path '%s'", tag[2], tag[1])
}
if _, err := hex.Decode(hash[:], unsafe.Slice(unsafe.StringData(tag[2]), 64)); err != nil {
return nil, fmt.Errorf("invalid hash '%s' for path '%s'", tag[2], tag[1])
}
sm.Paths[NormalizePath(tag[1])] = hash
case "server":
sm.Servers = append(sm.Servers, tag[1])
case "title":
sm.Title = tag[1]
case "description":
sm.Description = tag[1]
case "source":
sm.Source = tag[1]
}
}
if len(sm.Paths) == 0 {
return sm, fmt.Errorf("nsite has zero paths listed")
}
return sm, nil
}
func (sm SiteManifest) ToEvent() nostr.Event {
event := nostr.Event{
PubKey: sm.Pubkey,
CreatedAt: nostr.Now(),
Tags: nostr.Tags{},
}
if sm.Root {
event.Kind = nostr.KindNsiteRoot
} else {
event.Kind = nostr.KindNsiteNamed
event.Tags = append(event.Tags, nostr.Tag{"d", sm.Identifier})
}
for path, hash := range sm.Paths {
event.Tags = append(event.Tags, nostr.Tag{"path", NormalizePath(path), hex.EncodeToString(hash[:])})
}
for _, s := range sm.Servers {
event.Tags = append(event.Tags, nostr.Tag{"server", s})
}
if sm.Title != "" {
event.Tags = append(event.Tags, nostr.Tag{"title", sm.Title})
}
if sm.Description != "" {
event.Tags = append(event.Tags, nostr.Tag{"description", sm.Description})
}
if sm.Source != "" {
event.Tags = append(event.Tags, nostr.Tag{"source", sm.Source})
}
return event
}
//go:inline
func (sm *SiteManifest) GetHashForPath(path string) ([32]byte, bool) {
path = NormalizePath(path)
hash, ok := sm.Paths[path]
return hash, ok
}
func DecodeSiteURL(label string) (pubkey nostr.PubKey, identifier string, isRoot bool, err error) {
label, _, _ = strings.Cut(label, ".")
if strings.HasPrefix(label, "npub1") {
_, value, err := nip19.Decode(label)
if err != nil {
return nostr.ZeroPK, "", false, err
}
return value.(nostr.PubKey), "", true, nil
}
if len(label) < 51 || len(label) > 63 || strings.HasSuffix(label, "-") {
return nostr.ZeroPK, "", false, fmt.Errorf("invalid site label format")
}
pubkeyB36 := label[:50]
dTag := label[50:]
if !regexp.MustCompile(`^[a-z0-9-]{1,13}$`).MatchString(dTag) {
return nostr.ZeroPK, "", false, fmt.Errorf("invalid dtag format")
}
pk, err := PubKeyFromBase36(pubkeyB36)
if err != nil {
return nostr.ZeroPK, "", false, err
}
return pk, dTag, false, nil
}
+237
View File
@@ -0,0 +1,237 @@
package nip5a
import (
"encoding/hex"
"testing"
"fiatjaf.com/nostr"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestParseSiteManifest(t *testing.T) {
pubkey := nostr.MustPubKeyFromHex("266815e0c9210dfa324c6cba3573b14bee49da4209a9456f9484e5106cd408a5")
t.Run("root site", func(t *testing.T) {
event := &nostr.Event{
Kind: nostr.KindNsiteRoot,
PubKey: pubkey,
Tags: nostr.Tags{
{"path", "/index.html", "186ea5fd14e88fd1ac49351759e7ab906fa94892002b60bf7f5a428f28ca1c99"},
{"path", "/about.html", "a1b2c3d4e5f6789012345678901234567890abcdef1234567890abcdef123456"},
{"path", "/favicon.ico", "fedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321"},
{"server", "https://blossom.example.com"},
{"title", "My Nostr Site"},
{"description", "A static website hosted on Nostr"},
{"source", "https://github.com/example/my-nostr-site"},
},
}
sm, err := ParseSiteManifest(event)
require.NoError(t, err)
assert.True(t, sm.Root)
assert.Equal(t, pubkey, sm.Pubkey)
assert.Equal(t, "My Nostr Site", sm.Title)
assert.Equal(t, "A static website hosted on Nostr", sm.Description)
assert.Equal(t, "https://github.com/example/my-nostr-site", sm.Source)
assert.Len(t, sm.Paths, 3)
assert.Len(t, sm.Servers, 1)
assert.Equal(t, "https://blossom.example.com", sm.Servers[0])
})
t.Run("named site", func(t *testing.T) {
event := &nostr.Event{
Kind: nostr.KindNsiteNamed,
PubKey: pubkey,
Tags: nostr.Tags{
{"d", "blog"},
{"path", "/index.html", "186ea5fd14e88fd1ac49351759e7ab906fa94892002b60bf7f5a428f28ca1c99"},
{"path", "/post.html", "a1b2c3d4e5f6789012345678901234567890abcdef1234567890abcdef123456"},
{"server", "https://blossom.example.com"},
{"title", "My Blog"},
{"description", "A blog hosted on Nostr"},
{"source", "https://github.com/example/my-nostr-blog"},
},
}
sm, err := ParseSiteManifest(event)
require.NoError(t, err)
assert.False(t, sm.Root)
assert.Equal(t, "blog", sm.Identifier)
assert.Equal(t, pubkey, sm.Pubkey)
assert.Equal(t, "My Blog", sm.Title)
})
t.Run("missing d tag on named site", func(t *testing.T) {
event := &nostr.Event{
Kind: nostr.KindNsiteNamed,
PubKey: pubkey,
Tags: nostr.Tags{
{"path", "/index.html", "186ea5fd14e88fd1ac49351759e7ab906fa94892002b60bf7f5a428f28ca1c99"},
},
}
_, err := ParseSiteManifest(event)
assert.Error(t, err)
assert.Contains(t, err.Error(), "missing d tag")
})
t.Run("invalid kind", func(t *testing.T) {
event := &nostr.Event{
Kind: 1,
PubKey: pubkey,
Tags: nostr.Tags{},
}
_, err := ParseSiteManifest(event)
assert.Error(t, err)
assert.Contains(t, err.Error(), "invalid site manifest kind")
})
}
func TestGetHashForPath(t *testing.T) {
pubkey := nostr.MustPubKeyFromHex("266815e0c9210dfa324c6cba3573b14bee49da4209a9456f9484e5106cd408a5")
event := &nostr.Event{
Kind: nostr.KindNsiteRoot,
PubKey: pubkey,
Tags: nostr.Tags{
{"path", "/index.html", "186ea5fd14e88fd1ac49351759e7ab906fa94892002b60bf7f5a428f28ca1c99"},
{"path", "/about.html", "a1b2c3d4e5f6789012345678901234567890abcdef1234567890abcdef123456"},
},
}
sm, err := ParseSiteManifest(event)
require.NoError(t, err)
hash, ok := sm.GetHashForPath("/index.html")
assert.True(t, ok)
assert.Equal(t, "186ea5fd14e88fd1ac49351759e7ab906fa94892002b60bf7f5a428f28ca1c99", hex.EncodeToString(hash[:]))
_, ok = sm.GetHashForPath("/nonexistent.html")
assert.False(t, ok)
}
func TestNormalizePath(t *testing.T) {
tests := []struct {
input string
expected string
}{
{"/index.html", "/index.html"},
{"/about.html", "/about.html"},
{"/blog/", "/blog/index.html"},
{"/", "/index.html"},
}
for _, test := range tests {
result := NormalizePath(test.input)
assert.Equal(t, test.expected, result)
}
}
func TestPubKeyBase36(t *testing.T) {
pubkey := nostr.MustPubKeyFromHex("266815e0c9210dfa324c6cba3573b14bee49da4209a9456f9484e5106cd408a5")
b36 := PubKeyToBase36(pubkey)
assert.Len(t, b36, 50)
decoded, err := PubKeyFromBase36(b36)
require.NoError(t, err)
assert.Equal(t, pubkey, decoded)
}
func TestDecodeSiteURL(t *testing.T) {
pubkey := nostr.MustPubKeyFromHex("266815e0c9210dfa324c6cba3573b14bee49da4209a9456f9484e5106cd408a5")
t.Run("npub root site", func(t *testing.T) {
decodedPubkey, identifier, isRoot, err := DecodeSiteURL("npub1ye5ptcxfyyxl5vjvdjar2ua3f0hynkjzpx552mu5snj3qmx5pzjscpknpr")
require.NoError(t, err)
assert.True(t, isRoot)
assert.Equal(t, "", identifier)
assert.Equal(t, decodedPubkey, pubkey)
})
t.Run("named site", func(t *testing.T) {
b36 := PubKeyToBase36(pubkey)
label := b36 + "blog"
decodedPubkey, identifier, isRoot, err := DecodeSiteURL(label)
require.NoError(t, err)
assert.False(t, isRoot)
assert.Equal(t, "blog", identifier)
assert.Equal(t, decodedPubkey, pubkey)
})
t.Run("strips domain suffix", func(t *testing.T) {
b36 := PubKeyToBase36(pubkey)
label := b36 + "blog.nsite-host.com"
_, identifier, _, err := DecodeSiteURL(label)
require.NoError(t, err)
assert.Equal(t, "blog", identifier)
})
t.Run("invalid dtag format", func(t *testing.T) {
b36 := PubKeyToBase36(pubkey)
label := b36 + "Blog"
_, _, _, err := DecodeSiteURL(label)
assert.Error(t, err)
assert.Contains(t, err.Error(), "invalid dtag format")
})
t.Run("label too short", func(t *testing.T) {
_, _, _, err := DecodeSiteURL("npub1")
assert.Error(t, err)
})
t.Run("ends with dash", func(t *testing.T) {
b36 := PubKeyToBase36(pubkey)
label := b36 + "blog-"
_, _, _, err := DecodeSiteURL(label)
assert.Error(t, err)
assert.Contains(t, err.Error(), "invalid site label format")
})
}
func TestSiteManifestToEvent(t *testing.T) {
pubkey := nostr.MustPubKeyFromHex("266815e0c9210dfa324c6cba3573b14bee49da4209a9456f9484e5106cd408a5")
sm := &SiteManifest{
Root: true,
Pubkey: pubkey,
Identifier: "",
Paths: map[string][32]byte{
"/index.html": mustHash("186ea5fd14e88fd1ac49351759e7ab906fa94892002b60bf7f5a428f28ca1c99"),
},
Servers: []string{"https://blossom.example.com"},
Title: "Test Site",
Description: "A test site",
Source: "https://github.com/example/test",
}
event := sm.ToEvent()
assert.Equal(t, nostr.KindNsiteRoot, event.Kind)
assert.Equal(t, pubkey, event.PubKey)
sm.Root = false
sm.Identifier = "blog"
event = sm.ToEvent()
assert.Equal(t, nostr.KindNsiteNamed, event.Kind)
found := false
for _, tag := range event.Tags {
if tag[0] == "d" && tag[1] == "blog" {
found = true
break
}
}
assert.True(t, found)
}
func mustHash(s string) [32]byte {
var h [32]byte
b, _ := hex.DecodeString(s)
copy(h[:], b)
return h
}
+40 -8
View File
@@ -94,6 +94,11 @@ meltworked:
nil,
)
// mark tokens as reserved before attempting melt
for _, i := range chosen.tokenIndexes {
w.Tokens[i].reserved = true
}
// request from mint to _melt_ into paying the invoice
delay := 200 * time.Millisecond
// this request will block until the invoice is paid or it fails
@@ -103,17 +108,44 @@ meltworked:
Inputs: chosen.proofs,
Outputs: preChange.bm,
})
inspectmeltstatusresponse:
if err != nil || meltStatus.State == nut05.Unpaid {
return "", fmt.Errorf("error melting token: %w", err)
} else if meltStatus.State == nut05.Unknown {
return "", fmt.Errorf("we don't know what happened with the melt at %s: %v", chosen.mint, meltStatus)
} else if meltStatus.State == nut05.Pending {
for {
for {
if err != nil || meltStatus.State == nut05.Unpaid {
// unreserve tokens to available state on failure
for _, i := range chosen.tokenIndexes {
w.Tokens[i].reserved = false
}
return "", fmt.Errorf("error melting token: %w", err)
} else if meltStatus.State == nut05.Unknown {
// unreserve tokens to available state on failure
for _, i := range chosen.tokenIndexes {
w.Tokens[i].reserved = false
}
return "", fmt.Errorf("we don't know what happened with the melt at %s: %v", chosen.mint, meltStatus)
} else if meltStatus.State == nut05.Pending {
time.Sleep(delay)
delay *= 2
meltStatus, err = client.GetMeltQuoteState(ctx, chosen.mint, meltStatus.Quote)
goto inspectmeltstatusresponse
if err != nil {
// unreserve tokens to available state on failure
for _, i := range chosen.tokenIndexes {
w.Tokens[i].reserved = false
}
return "", fmt.Errorf("error checking melt status: %w", err)
}
if meltStatus.State == nut05.Unpaid || meltStatus.State == nut05.Unknown {
// unreserve tokens to available state on failure
for _, i := range chosen.tokenIndexes {
w.Tokens[i].reserved = false
}
return "", fmt.Errorf("melt failed with state %v", meltStatus.State)
} else if meltStatus.State == nut05.Paid {
// payment successful
break
}
// continue looping for pending state
continue
} else if meltStatus.State == nut05.Paid {
break
}
}
+3
View File
@@ -153,6 +153,9 @@ func (w *Wallet) getProofsForSending(
) (chosenTokens, uint64, error) {
byMint := make(map[string]chosenTokens)
for t, token := range w.Tokens {
if token.reserved {
continue
}
if fromMint != "" && token.Mint != fromMint {
continue
}
+1
View File
@@ -14,6 +14,7 @@ type Token struct {
Proofs cashu.Proofs `json:"proofs"`
Deleted []nostr.ID `json:"del,omitempty"`
reserved bool
mintedAt nostr.Timestamp
event *nostr.Event
}
+4
View File
@@ -249,6 +249,10 @@ func (w *Wallet) removeDeletedToken(eventId nostr.ID) {
func (w *Wallet) Balance() uint64 {
var sum uint64
for _, token := range w.Tokens {
if token.reserved {
continue
}
sum += token.Proofs.Amount()
}
return sum

Some files were not shown because too many files have changed in this diff Show More