eventstore: fix lmdb and mmm ptag-kind queries.
the prefix was missing the 2 bytes of the kind when preparing the query. we also simplify the query planner logic a little bit and eliminate some useless fields and everything is clearer.
This commit is contained in:
@@ -36,18 +36,15 @@ func (b *LMDBBackend) CountEvents(filter nostr.Filter) (uint32, error) {
|
||||
// we already have a k and a v and an err from the cursor setup, so check and use these
|
||||
if it.exhausted ||
|
||||
it.err != nil ||
|
||||
len(it.key) != q.keySize ||
|
||||
len(it.key) != len(q.prefix)+4 ||
|
||||
!bytes.HasPrefix(it.key, q.prefix) {
|
||||
// either iteration has errored or we reached the end of this prefix
|
||||
break // stop this cursor and move to the next one
|
||||
}
|
||||
|
||||
// "id" indexes don't contain a timestamp
|
||||
if q.dbi != b.indexId {
|
||||
createdAt := binary.BigEndian.Uint32(it.key[len(it.key)-4:])
|
||||
if createdAt < since {
|
||||
break
|
||||
}
|
||||
createdAt := binary.BigEndian.Uint32(it.key[len(it.key)-4:])
|
||||
if createdAt < since {
|
||||
break
|
||||
}
|
||||
|
||||
if extraAuthors == nil && extraKinds == nil && extraTagValues == nil {
|
||||
@@ -129,18 +126,15 @@ func (b *LMDBBackend) CountEventsHLL(filter nostr.Filter, offset int) (uint32, *
|
||||
for {
|
||||
// we already have a k and a v and an err from the cursor setup, so check and use these
|
||||
if it.err != nil ||
|
||||
len(it.key) != q.keySize ||
|
||||
len(it.key) != len(q.prefix)+4 ||
|
||||
!bytes.HasPrefix(it.key, q.prefix) {
|
||||
// either iteration has errored or we reached the end of this prefix
|
||||
break // stop this cursor and move to the next one
|
||||
}
|
||||
|
||||
// "id" indexes don't contain a timestamp
|
||||
if q.dbi != b.indexId {
|
||||
createdAt := binary.BigEndian.Uint32(it.key[len(it.key)-4:])
|
||||
if createdAt < since {
|
||||
break
|
||||
}
|
||||
createdAt := binary.BigEndian.Uint32(it.key[len(it.key)-4:])
|
||||
if createdAt < since {
|
||||
break
|
||||
}
|
||||
|
||||
// fetch actual event (we need it regardless because we need the pubkey for the hll)
|
||||
|
||||
@@ -45,7 +45,7 @@ func (it *iterator) pull(n int, since uint32) {
|
||||
return
|
||||
}
|
||||
|
||||
if len(it.key) != query.keySize || !bytes.HasPrefix(it.key, query.prefix) {
|
||||
if len(it.key) != len(query.prefix)+4 || !bytes.HasPrefix(it.key, query.prefix) {
|
||||
// we reached the end of this prefix
|
||||
it.exhausted = true
|
||||
return
|
||||
|
||||
@@ -14,7 +14,6 @@ type query struct {
|
||||
i int
|
||||
dbi lmdb.DBI
|
||||
prefix []byte
|
||||
keySize int
|
||||
startingPoint []byte
|
||||
}
|
||||
|
||||
@@ -40,10 +39,10 @@ func (b *LMDBBackend) prepareQueries(filter nostr.Filter) (
|
||||
}
|
||||
}
|
||||
for i, q := range queries {
|
||||
sp := make([]byte, len(q.prefix))
|
||||
sp = sp[0:len(q.prefix)]
|
||||
copy(sp, q.prefix)
|
||||
queries[i].startingPoint = binary.BigEndian.AppendUint32(sp, uint32(until))
|
||||
sp := make([]byte, len(q.prefix)+4)
|
||||
copy(sp[0:len(q.prefix)], q.prefix)
|
||||
binary.BigEndian.PutUint32(sp[len(q.prefix):], uint32(until))
|
||||
queries[i].startingPoint = sp
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -64,39 +63,27 @@ func (b *LMDBBackend) prepareQueries(filter nostr.Filter) (
|
||||
}
|
||||
|
||||
// only "p" tag has a goodness of 2, so
|
||||
if goodness == 2 {
|
||||
if goodness == 2 && filter.Kinds != nil {
|
||||
// this means we got a "p" tag, so we will use the ptag-kind index
|
||||
i := 0
|
||||
if filter.Kinds != nil {
|
||||
queries = make([]query, len(tagValues)*len(filter.Kinds))
|
||||
for _, value := range tagValues {
|
||||
if len(value) != 64 {
|
||||
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
|
||||
}
|
||||
|
||||
for _, kind := range filter.Kinds {
|
||||
k := make([]byte, 8+2)
|
||||
if err := xhex.Decode(k[0:8], []byte(value[0:8*2])); err != nil {
|
||||
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
|
||||
}
|
||||
binary.BigEndian.PutUint16(k[8:8+2], uint16(kind))
|
||||
queries[i] = query{i: i, dbi: b.indexPTagKind, prefix: k[0 : 8+2], keySize: 8 + 2 + 4}
|
||||
i++
|
||||
}
|
||||
queries = make([]query, len(tagValues)*len(filter.Kinds))
|
||||
for _, value := range tagValues {
|
||||
if len(value) != 64 {
|
||||
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
|
||||
}
|
||||
} else {
|
||||
// even if there are no kinds, in that case we will just return any kind and not care
|
||||
queries = make([]query, len(tagValues))
|
||||
for i, value := range tagValues {
|
||||
if len(value) != 64 {
|
||||
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
|
||||
}
|
||||
|
||||
k := make([]byte, 8)
|
||||
for _, kind := range filter.Kinds {
|
||||
k := make([]byte, 8+2)
|
||||
if err := xhex.Decode(k[0:8], []byte(value[0:8*2])); err != nil {
|
||||
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
|
||||
}
|
||||
queries[i] = query{i: i, dbi: b.indexPTagKind, prefix: k[0:8], keySize: 8 + 2 + 4}
|
||||
binary.BigEndian.PutUint16(k[8:8+2], uint16(kind))
|
||||
queries[i] = query{
|
||||
i: i,
|
||||
dbi: b.indexPTagKind,
|
||||
prefix: k[0 : 8+2],
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -107,7 +94,11 @@ func (b *LMDBBackend) prepareQueries(filter nostr.Filter) (
|
||||
dbi, k, offset := b.getTagIndexPrefix(tagKey, value)
|
||||
// remove the last parts part to get just the prefix we want here
|
||||
prefix := k[0:offset]
|
||||
queries[i] = query{i: i, dbi: dbi, prefix: prefix, keySize: len(prefix) + 4}
|
||||
queries[i] = query{
|
||||
i: i,
|
||||
dbi: dbi,
|
||||
prefix: prefix,
|
||||
}
|
||||
}
|
||||
|
||||
// add an extra kind filter if available (only do this on plain tag index, not on ptag-kind index)
|
||||
@@ -142,7 +133,11 @@ pubkeyMatching:
|
||||
// will use pubkey index
|
||||
queries = make([]query, len(filter.Authors))
|
||||
for i, pk := range filter.Authors {
|
||||
queries[i] = query{i: i, dbi: b.indexPubkey, prefix: pk[0:8], keySize: 8 + 4}
|
||||
queries[i] = query{
|
||||
i: i,
|
||||
dbi: b.indexPubkey,
|
||||
prefix: pk[0:8],
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// will use pubkeyKind index
|
||||
@@ -153,7 +148,11 @@ pubkeyMatching:
|
||||
prefix := make([]byte, 8+2)
|
||||
copy(prefix[0:8], pk[0:8])
|
||||
binary.BigEndian.PutUint16(prefix[8:8+2], uint16(kind))
|
||||
queries[i] = query{i: i, dbi: b.indexPubkeyKind, prefix: prefix[0 : 8+2], keySize: 10 + 4}
|
||||
queries[i] = query{
|
||||
i: i,
|
||||
dbi: b.indexPubkeyKind,
|
||||
prefix: prefix[0 : 8+2],
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
@@ -170,7 +169,11 @@ pubkeyMatching:
|
||||
for i, kind := range filter.Kinds {
|
||||
prefix := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(prefix[0:2], uint16(kind))
|
||||
queries[i] = query{i: i, dbi: b.indexKind, prefix: prefix[0:2], keySize: 2 + 4}
|
||||
queries[i] = query{
|
||||
i: i,
|
||||
dbi: b.indexKind,
|
||||
prefix: prefix[0:2],
|
||||
}
|
||||
}
|
||||
|
||||
// potentially with an extra useless tag filtering
|
||||
@@ -181,6 +184,10 @@ pubkeyMatching:
|
||||
// if we got here our query will have nothing to filter with
|
||||
queries = make([]query, 1)
|
||||
prefix := make([]byte, 0)
|
||||
queries[0] = query{i: 0, dbi: b.indexCreatedAt, prefix: prefix, keySize: 0 + 4}
|
||||
queries[0] = query{
|
||||
i: 0,
|
||||
dbi: b.indexCreatedAt,
|
||||
prefix: prefix,
|
||||
}
|
||||
return queries, nil, nil, "", nil, since, nil
|
||||
}
|
||||
|
||||
@@ -33,18 +33,15 @@ func (il *IndexingLayer) CountEvents(filter nostr.Filter) (uint32, error) {
|
||||
// we already have a k and a v and an err from the cursor setup, so check and use these
|
||||
if it.exhausted ||
|
||||
it.err != nil ||
|
||||
len(it.key) != q.keySize ||
|
||||
len(it.key) != len(q.prefix)+4 ||
|
||||
!bytes.HasPrefix(it.key, q.prefix) {
|
||||
// either iteration has errored or we reached the end of this prefix
|
||||
break // stop this cursor and move to the next one
|
||||
}
|
||||
|
||||
// "id" indexes don't contain a timestamp
|
||||
if q.timestampSize == 4 {
|
||||
createdAt := binary.BigEndian.Uint32(it.key[len(it.key)-4:])
|
||||
if createdAt < since {
|
||||
break
|
||||
}
|
||||
createdAt := binary.BigEndian.Uint32(it.key[len(it.key)-4:])
|
||||
if createdAt < since {
|
||||
break
|
||||
}
|
||||
|
||||
if extraAuthors == nil && extraKinds == nil && extraTagValues == nil {
|
||||
|
||||
@@ -41,7 +41,7 @@ func (it *iterator) pull(n int, since uint32) {
|
||||
return
|
||||
}
|
||||
|
||||
if len(it.key) != it.query.keySize || !bytes.HasPrefix(it.key, it.query.prefix) {
|
||||
if len(it.key) != len(it.query.prefix)+4 || !bytes.HasPrefix(it.key, it.query.prefix) {
|
||||
// we reached the end of this prefix
|
||||
it.exhausted = true
|
||||
return
|
||||
@@ -226,7 +226,7 @@ func (il *IndexingLayer) getIndexKeysForEvent(evt nostr.Event) iter.Seq[key] {
|
||||
return
|
||||
}
|
||||
|
||||
// now the p-tag+kind+date
|
||||
// now the p-1733934977tag+kind+date
|
||||
if dbi == il.indexTag32 && tag[0] == "p" {
|
||||
k := make([]byte, 8+2+4)
|
||||
xhex.Decode(k[0:8], []byte(tag[1][0:8*2]))
|
||||
|
||||
@@ -14,8 +14,6 @@ type query struct {
|
||||
i int
|
||||
dbi lmdb.DBI
|
||||
prefix []byte
|
||||
keySize int
|
||||
timestampSize int
|
||||
startingPoint []byte
|
||||
}
|
||||
|
||||
@@ -41,10 +39,10 @@ func (il *IndexingLayer) prepareQueries(filter nostr.Filter) (
|
||||
}
|
||||
}
|
||||
for i, q := range queries {
|
||||
sp := make([]byte, len(q.prefix))
|
||||
sp = sp[0:len(q.prefix)]
|
||||
copy(sp, q.prefix)
|
||||
queries[i].startingPoint = binary.BigEndian.AppendUint32(sp, uint32(until))
|
||||
sp := make([]byte, len(q.prefix)+4)
|
||||
copy(sp[0:len(q.prefix)], q.prefix)
|
||||
binary.BigEndian.PutUint32(sp[len(q.prefix):], uint32(until))
|
||||
queries[i].startingPoint = sp
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -65,39 +63,27 @@ func (il *IndexingLayer) prepareQueries(filter nostr.Filter) (
|
||||
}
|
||||
|
||||
// only "p" tag has a goodness of 2, so
|
||||
if goodness == 2 {
|
||||
if goodness == 2 && filter.Kinds != nil {
|
||||
// this means we got a "p" tag, so we will use the ptag-kind index
|
||||
i := 0
|
||||
if filter.Kinds != nil {
|
||||
queries = make([]query, len(tagValues)*len(filter.Kinds))
|
||||
for _, value := range tagValues {
|
||||
if len(value) != 64 {
|
||||
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
|
||||
}
|
||||
|
||||
for _, kind := range filter.Kinds {
|
||||
k := make([]byte, 8+2)
|
||||
if err := xhex.Decode(k[0:8], []byte(value[0:8*2])); err != nil {
|
||||
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
|
||||
}
|
||||
binary.BigEndian.PutUint16(k[8:8+2], uint16(kind))
|
||||
queries[i] = query{i: i, dbi: il.indexPTagKind, prefix: k[0 : 8+2], keySize: 8 + 2 + 4, timestampSize: 4}
|
||||
i++
|
||||
}
|
||||
queries = make([]query, len(tagValues)*len(filter.Kinds))
|
||||
for _, value := range tagValues {
|
||||
if len(value) != 64 {
|
||||
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
|
||||
}
|
||||
} else {
|
||||
// even if there are no kinds, in that case we will just return any kind and not care
|
||||
queries = make([]query, len(tagValues))
|
||||
for i, value := range tagValues {
|
||||
if len(value) != 64 {
|
||||
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
|
||||
}
|
||||
|
||||
k := make([]byte, 8)
|
||||
for _, kind := range filter.Kinds {
|
||||
k := make([]byte, 8+2)
|
||||
if err := xhex.Decode(k[0:8], []byte(value[0:8*2])); err != nil {
|
||||
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value)
|
||||
}
|
||||
queries[i] = query{i: i, dbi: il.indexPTagKind, prefix: k[0:8], keySize: 8 + 2 + 4, timestampSize: 4}
|
||||
binary.BigEndian.PutUint16(k[8:8+2], uint16(kind))
|
||||
queries[i] = query{
|
||||
i: i,
|
||||
dbi: il.indexPTagKind,
|
||||
prefix: k[0 : 8+2],
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -108,7 +94,11 @@ func (il *IndexingLayer) prepareQueries(filter nostr.Filter) (
|
||||
dbi, k, offset := il.getTagIndexPrefix(tagKey, value)
|
||||
// remove the last parts part to get just the prefix we want here
|
||||
prefix := k[0:offset]
|
||||
queries[i] = query{i: i, dbi: dbi, prefix: prefix, keySize: len(prefix) + 4, timestampSize: 4}
|
||||
queries[i] = query{
|
||||
i: i,
|
||||
dbi: dbi,
|
||||
prefix: prefix,
|
||||
}
|
||||
}
|
||||
|
||||
// add an extra kind filter if available (only do this on plain tag index, not on ptag-kind index)
|
||||
@@ -143,9 +133,11 @@ pubkeyMatching:
|
||||
// will use pubkey index
|
||||
queries = make([]query, len(filter.Authors))
|
||||
for i, pk := range filter.Authors {
|
||||
prefix := make([]byte, 8)
|
||||
copy(prefix[0:8], pk[0:8])
|
||||
queries[i] = query{i: i, dbi: il.indexPubkey, prefix: prefix[0:8], keySize: 8 + 4, timestampSize: 4}
|
||||
queries[i] = query{
|
||||
i: i,
|
||||
dbi: il.indexPubkey,
|
||||
prefix: pk[0:8],
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// will use pubkeyKind index
|
||||
@@ -156,7 +148,11 @@ pubkeyMatching:
|
||||
prefix := make([]byte, 8+2)
|
||||
copy(prefix[0:8], pk[0:8])
|
||||
binary.BigEndian.PutUint16(prefix[8:8+2], uint16(kind))
|
||||
queries[i] = query{i: i, dbi: il.indexPubkeyKind, prefix: prefix[0 : 8+2], keySize: 10 + 4, timestampSize: 4}
|
||||
queries[i] = query{
|
||||
i: i,
|
||||
dbi: il.indexPubkeyKind,
|
||||
prefix: prefix[0 : 8+2],
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
@@ -173,7 +169,11 @@ pubkeyMatching:
|
||||
for i, kind := range filter.Kinds {
|
||||
prefix := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(prefix[0:2], uint16(kind))
|
||||
queries[i] = query{i: i, dbi: il.indexKind, prefix: prefix[0:2], keySize: 2 + 4, timestampSize: 4}
|
||||
queries[i] = query{
|
||||
i: i,
|
||||
dbi: il.indexKind,
|
||||
prefix: prefix[0:2],
|
||||
}
|
||||
}
|
||||
|
||||
// potentially with an extra useless tag filtering
|
||||
@@ -184,6 +184,10 @@ pubkeyMatching:
|
||||
// if we got here our query will have nothing to filter with
|
||||
queries = make([]query, 1)
|
||||
prefix := make([]byte, 0)
|
||||
queries[0] = query{i: 0, dbi: il.indexCreatedAt, prefix: prefix, keySize: 0 + 4, timestampSize: 4}
|
||||
queries[0] = query{
|
||||
i: 0,
|
||||
dbi: il.indexCreatedAt,
|
||||
prefix: prefix,
|
||||
}
|
||||
return queries, nil, nil, "", nil, since, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user