Zettelstore

Check-in [2c49d84c3c]
Login

Check-in [2c49d84c3c]

Many hyperlinks are disabled.
Use anonymous login to enable hyperlinks.

Overview
Comment:Integrate removal of space nodes and refactoring for new set implementation
Timelines: family | ancestors | descendants | both | trunk
Files: files | file ages | folders
SHA3-256: 2c49d84c3cb1f6606a030b09cbecc4181e9e2d7470641313a8cd78e161a8d40d
User & Date: stern 2024-06-25 14:55:37
Context
2024-06-25
16:54
Update dependency goldmark ... (Leaf check-in: 44970588f6 user: stern tags: trunk)
16:48
Merge from trunk ... (check-in: 1e5b2571a6 user: stern tags: b36)
14:55
Integrate removal of space nodes and refactoring for new set implementation ... (check-in: 2c49d84c3c user: stern tags: trunk)
2024-06-24
16:46
Merge from id-Set-based refactorings ... (Closed-Leaf check-in: f79ae4605a user: stern tags: nospace)
2024-06-23
15:00
Update goldmark dependency ... (check-in: 7d1c721311 user: stern tags: trunk)
Changes
Hide Diffs Unified Diffs Ignore Whitespace Patch

Changes to ast/inline.go.

10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
// SPDX-License-Identifier: EUPL-1.2
// SPDX-FileCopyrightText: 2020-present Detlef Stern
//-----------------------------------------------------------------------------

package ast

import (
	"unicode/utf8"

	"t73f.de/r/zsc/attrs"
)

// Definitions of inline nodes.

// InlineSlice is a list of BlockNodes.
type InlineSlice []InlineNode

func (*InlineSlice) inlineNode() { /* Just a marker */ }

// CreateInlineSliceFromWords makes a new inline list from words,
// that will be space-separated.
func CreateInlineSliceFromWords(words ...string) InlineSlice {
	inl := make(InlineSlice, 0, 2*len(words)-1)
	for i, word := range words {
		if i > 0 {
			inl = append(inl, &SpaceNode{Lexeme: " "})
		}
		inl = append(inl, &TextNode{Text: word})
	}
	return inl
}

// WalkChildren walks down to the list.
func (is *InlineSlice) WalkChildren(v Visitor) {
	for _, in := range *is {
		Walk(v, in)
	}
}

// --------------------------------------------------------------------------

// TextNode just contains some text.
type TextNode struct {
	Text string // The text itself.
}

func (*TextNode) inlineNode() { /* Just a marker */ }

// WalkChildren does nothing.
func (*TextNode) WalkChildren(Visitor) { /* No children*/ }

// --------------------------------------------------------------------------

// SpaceNode tracks inter-word space characters.
type SpaceNode struct {
	Lexeme string
}

func (*SpaceNode) inlineNode() { /* Just a marker */ }

// WalkChildren does nothing.
func (*SpaceNode) WalkChildren(Visitor) { /* No children*/ }

// Count returns the number of space runes.
func (sn *SpaceNode) Count() int {
	return utf8.RuneCountInString(sn.Lexeme)
}

// --------------------------------------------------------------------------

// BreakNode signals a new line that must / should be interpreted as a new line break.
type BreakNode struct {
	Hard bool // Hard line break?
}








<
<










<
<
<
<
<
<
<
<
<
<
<
<
<



















<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<







10
11
12
13
14
15
16


17
18
19
20
21
22
23
24
25
26













27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45

















46
47
48
49
50
51
52
// SPDX-License-Identifier: EUPL-1.2
// SPDX-FileCopyrightText: 2020-present Detlef Stern
//-----------------------------------------------------------------------------

package ast

import (


	"t73f.de/r/zsc/attrs"
)

// Definitions of inline nodes.

// InlineSlice is a list of BlockNodes.
type InlineSlice []InlineNode

func (*InlineSlice) inlineNode() { /* Just a marker */ }














// WalkChildren walks down to the list.
func (is *InlineSlice) WalkChildren(v Visitor) {
	for _, in := range *is {
		Walk(v, in)
	}
}

// --------------------------------------------------------------------------

// TextNode just contains some text.
type TextNode struct {
	Text string // The text itself.
}

func (*TextNode) inlineNode() { /* Just a marker */ }

// WalkChildren does nothing.
func (*TextNode) WalkChildren(Visitor) { /* No children*/ }


















// --------------------------------------------------------------------------

// BreakNode signals a new line that must / should be interpreted as a new line break.
type BreakNode struct {
	Hard bool // Hard line break?
}

Changes to ast/walk_test.go.

19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
	"t73f.de/r/zsc/attrs"
	"zettelstore.de/z/ast"
)

func BenchmarkWalk(b *testing.B) {
	root := ast.BlockSlice{
		&ast.HeadingNode{
			Inlines: ast.CreateInlineSliceFromWords("A", "Simple", "Heading"),
		},
		&ast.ParaNode{
			Inlines: ast.CreateInlineSliceFromWords("This", "is", "the", "introduction."),
		},
		&ast.NestedListNode{
			Kind: ast.NestedListUnordered,
			Items: []ast.ItemSlice{
				[]ast.ItemNode{
					&ast.ParaNode{
						Inlines: ast.CreateInlineSliceFromWords("Item", "1"),
					},
				},
				[]ast.ItemNode{
					&ast.ParaNode{
						Inlines: ast.CreateInlineSliceFromWords("Item", "2"),
					},
				},
			},
		},
		&ast.ParaNode{
			Inlines: ast.CreateInlineSliceFromWords("This", "is", "some", "intermediate", "text."),
		},
		ast.CreateParaNode(
			&ast.FormatNode{
				Kind: ast.FormatEmph,
				Attrs: attrs.Attributes(map[string]string{
					"":      "class",
					"color": "green",
				}),
				Inlines: ast.CreateInlineSliceFromWords("This", "is", "some", "emphasized", "text."),
			},
			&ast.SpaceNode{Lexeme: " "},
			&ast.LinkNode{
				Ref:     &ast.Reference{Value: "http://zettelstore.de"},
				Inlines: ast.CreateInlineSliceFromWords("URL", "text."),
			},
		),
	}
	v := benchVisitor{}
	b.ResetTimer()
	for range b.N {
		ast.Walk(&v, &root)







|


|






|




|





|








|

|


|







19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
	"t73f.de/r/zsc/attrs"
	"zettelstore.de/z/ast"
)

func BenchmarkWalk(b *testing.B) {
	root := ast.BlockSlice{
		&ast.HeadingNode{
			Inlines: ast.InlineSlice{&ast.TextNode{Text: "A Simple Heading"}},
		},
		&ast.ParaNode{
			Inlines: ast.InlineSlice{&ast.TextNode{Text: "This is the introduction."}},
		},
		&ast.NestedListNode{
			Kind: ast.NestedListUnordered,
			Items: []ast.ItemSlice{
				[]ast.ItemNode{
					&ast.ParaNode{
						Inlines: ast.InlineSlice{&ast.TextNode{Text: "Item 1"}},
					},
				},
				[]ast.ItemNode{
					&ast.ParaNode{
						Inlines: ast.InlineSlice{&ast.TextNode{Text: "Item 2"}},
					},
				},
			},
		},
		&ast.ParaNode{
			Inlines: ast.InlineSlice{&ast.TextNode{Text: "This is some intermediate text."}},
		},
		ast.CreateParaNode(
			&ast.FormatNode{
				Kind: ast.FormatEmph,
				Attrs: attrs.Attributes(map[string]string{
					"":      "class",
					"color": "green",
				}),
				Inlines: ast.InlineSlice{&ast.TextNode{Text: "This is some emphasized text."}},
			},
			&ast.TextNode{Text: " "},
			&ast.LinkNode{
				Ref:     &ast.Reference{Value: "http://zettelstore.de"},
				Inlines: ast.InlineSlice{&ast.TextNode{Text: "URL text."}},
			},
		),
	}
	v := benchVisitor{}
	b.ResetTimer()
	for range b.N {
		ast.Walk(&v, &root)

Changes to auth/policy/box.go.

74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
	return zettel.Zettel{}, box.NewErrNotAllowed("GetZettel", user, zid)
}

func (pp *polBox) GetAllZettel(ctx context.Context, zid id.Zid) ([]zettel.Zettel, error) {
	return pp.box.GetAllZettel(ctx, zid)
}

func (pp *polBox) FetchZids(ctx context.Context) (id.Set, error) {
	return nil, box.NewErrNotAllowed("fetch-zids", server.GetUser(ctx), id.Invalid)
}

func (pp *polBox) GetMeta(ctx context.Context, zid id.Zid) (*meta.Meta, error) {
	m, err := pp.box.GetMeta(ctx, zid)
	if err != nil {
		return nil, err







|







74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
	return zettel.Zettel{}, box.NewErrNotAllowed("GetZettel", user, zid)
}

func (pp *polBox) GetAllZettel(ctx context.Context, zid id.Zid) ([]zettel.Zettel, error) {
	return pp.box.GetAllZettel(ctx, zid)
}

func (pp *polBox) FetchZids(ctx context.Context) (*id.Set, error) {
	return nil, box.NewErrNotAllowed("fetch-zids", server.GetUser(ctx), id.Invalid)
}

func (pp *polBox) GetMeta(ctx context.Context, zid id.Zid) (*meta.Meta, error) {
	m, err := pp.box.GetMeta(ctx, zid)
	if err != nil {
		return nil, err

Changes to box/box.go.

134
135
136
137
138
139
140
141
142
143
144
145
146
147
148

// Box is to be used outside the box package and its descendants.
type Box interface {
	BaseBox
	WriteBox

	// FetchZids returns the set of all zettel identifer managed by the box.
	FetchZids(ctx context.Context) (id.Set, error)

	// GetMeta returns the metadata of the zettel with the given identifier.
	GetMeta(context.Context, id.Zid) (*meta.Meta, error)

	// SelectMeta returns a list of metadata that comply to the given selection criteria.
	// If `metaSeq` is `nil`, the box assumes metadata of all available zettel.
	SelectMeta(ctx context.Context, metaSeq []*meta.Meta, q *query.Query) ([]*meta.Meta, error)







|







134
135
136
137
138
139
140
141
142
143
144
145
146
147
148

// Box is to be used outside the box package and its descendants.
type Box interface {
	BaseBox
	WriteBox

	// FetchZids returns the set of all zettel identifer managed by the box.
	FetchZids(ctx context.Context) (*id.Set, error)

	// GetMeta returns the metadata of the zettel with the given identifier.
	GetMeta(context.Context, id.Zid) (*meta.Meta, error)

	// SelectMeta returns a list of metadata that comply to the given selection criteria.
	// If `metaSeq` is `nil`, the box assumes metadata of all available zettel.
	SelectMeta(ctx context.Context, metaSeq []*meta.Meta, q *query.Query) ([]*meta.Meta, error)

Changes to box/manager/anteroom.go.

25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
	arNothing arAction = iota
	arReload
	arZettel
)

type anteroom struct {
	next    *anteroom
	waiting id.Set
	curLoad int
	reload  bool
}

type anteroomQueue struct {
	mx      sync.Mutex
	first   *anteroom







|







25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
	arNothing arAction = iota
	arReload
	arZettel
)

type anteroom struct {
	next    *anteroom
	waiting *id.Set
	curLoad int
	reload  bool
}

type anteroomQueue struct {
	mx      sync.Mutex
	first   *anteroom
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
		ar.last = ar.first
		return
	}
	for room := ar.first; room != nil; room = room.next {
		if room.reload {
			continue // Do not put zettel in reload room
		}
		if _, ok := room.waiting[zid]; ok {
			// Zettel is already waiting. Nothing to do.
			return
		}
	}
	if room := ar.last; !room.reload && (ar.maxLoad == 0 || room.curLoad < ar.maxLoad) {
		room.waiting.Add(zid)
		room.curLoad++







|







54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
		ar.last = ar.first
		return
	}
	for room := ar.first; room != nil; room = room.next {
		if room.reload {
			continue // Do not put zettel in reload room
		}
		if room.waiting.Contains(zid) {
			// Zettel is already waiting. Nothing to do.
			return
		}
	}
	if room := ar.last; !room.reload && (ar.maxLoad == 0 || room.curLoad < ar.maxLoad) {
		room.waiting.Add(zid)
		room.curLoad++
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
func (ar *anteroomQueue) Reset() {
	ar.mx.Lock()
	defer ar.mx.Unlock()
	ar.first = &anteroom{next: nil, waiting: nil, curLoad: 0, reload: true}
	ar.last = ar.first
}

func (ar *anteroomQueue) Reload(allZids id.Set) {
	ar.mx.Lock()
	defer ar.mx.Unlock()
	ar.deleteReloadedRooms()

	if ns := len(allZids); ns > 0 {
		ar.first = &anteroom{next: ar.first, waiting: allZids, curLoad: ns, reload: true}
		if ar.first.next == nil {
			ar.last = ar.first
		}
	} else {
		ar.first = nil
		ar.last = nil
	}







|




|
|







84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
func (ar *anteroomQueue) Reset() {
	ar.mx.Lock()
	defer ar.mx.Unlock()
	ar.first = &anteroom{next: nil, waiting: nil, curLoad: 0, reload: true}
	ar.last = ar.first
}

func (ar *anteroomQueue) Reload(allZids *id.Set) {
	ar.mx.Lock()
	defer ar.mx.Unlock()
	ar.deleteReloadedRooms()

	if !allZids.IsEmpty() {
		ar.first = &anteroom{next: ar.first, waiting: allZids, curLoad: allZids.Length(), reload: true}
		if ar.first.next == nil {
			ar.last = ar.first
		}
	} else {
		ar.first = nil
		ar.last = nil
	}
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
	defer ar.mx.Unlock()
	first := ar.first
	if first != nil {
		if first.waiting == nil && first.reload {
			ar.removeFirst()
			return arReload, id.Invalid, false
		}
		for zid := range first.waiting {
			delete(first.waiting, zid)
			if len(first.waiting) == 0 {
				ar.removeFirst()
			}
			return arZettel, zid, first.reload
		}
		ar.removeFirst()
	}
	return arNothing, id.Invalid, false







|
|
<







120
121
122
123
124
125
126
127
128

129
130
131
132
133
134
135
	defer ar.mx.Unlock()
	first := ar.first
	if first != nil {
		if first.waiting == nil && first.reload {
			ar.removeFirst()
			return arReload, id.Invalid, false
		}
		if zid, found := first.waiting.Pop(); found {
			if first.waiting.IsEmpty() {

				ar.removeFirst()
			}
			return arZettel, zid, first.reload
		}
		ar.removeFirst()
	}
	return arNothing, id.Invalid, false

Changes to box/manager/box.go.

109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
			result = append(result, z)
		}
	}
	return result, nil
}

// FetchZids returns the set of all zettel identifer managed by the box.
func (mgr *Manager) FetchZids(ctx context.Context) (id.Set, error) {
	mgr.mgrLog.Debug().Msg("FetchZids")
	if mgr.State() != box.StartStateStarted {
		return nil, box.ErrStopped
	}
	result := id.Set{}
	mgr.mgrMx.RLock()
	defer mgr.mgrMx.RUnlock()
	for _, p := range mgr.boxes {
		err := p.ApplyZid(ctx, func(zid id.Zid) { result.Add(zid) }, func(id.Zid) bool { return true })
		if err != nil {
			return nil, err
		}







|




|







109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
			result = append(result, z)
		}
	}
	return result, nil
}

// FetchZids returns the set of all zettel identifer managed by the box.
func (mgr *Manager) FetchZids(ctx context.Context) (*id.Set, error) {
	mgr.mgrLog.Debug().Msg("FetchZids")
	if mgr.State() != box.StartStateStarted {
		return nil, box.ErrStopped
	}
	result := id.NewSet()
	mgr.mgrMx.RLock()
	defer mgr.mgrMx.RUnlock()
	for _, p := range mgr.boxes {
		err := p.ApplyZid(ctx, func(zid id.Zid) { result.Add(zid) }, func(id.Zid) bool { return true })
		if err != nil {
			return nil, err
		}
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
	compSearch := q.RetrieveAndCompile(ctx, mgr, metaSeq)
	if result := compSearch.Result(); result != nil {
		mgr.mgrLog.Trace().Int("count", int64(len(result))).Msg("found without ApplyMeta")
		return result, nil
	}
	selected := map[id.Zid]*meta.Meta{}
	for _, term := range compSearch.Terms {
		rejected := id.Set{}
		handleMeta := func(m *meta.Meta) {
			zid := m.Zid
			if rejected.ContainsOrNil(zid) {
				mgr.mgrLog.Trace().Zid(zid).Msg("SelectMeta/alreadyRejected")
				return
			}
			if _, ok := selected[zid]; ok {
				mgr.mgrLog.Trace().Zid(zid).Msg("SelectMeta/alreadySelected")
				return
			}







|


|







174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
	compSearch := q.RetrieveAndCompile(ctx, mgr, metaSeq)
	if result := compSearch.Result(); result != nil {
		mgr.mgrLog.Trace().Int("count", int64(len(result))).Msg("found without ApplyMeta")
		return result, nil
	}
	selected := map[id.Zid]*meta.Meta{}
	for _, term := range compSearch.Terms {
		rejected := id.NewSet()
		handleMeta := func(m *meta.Meta) {
			zid := m.Zid
			if rejected.Contains(zid) {
				mgr.mgrLog.Trace().Zid(zid).Msg("SelectMeta/alreadyRejected")
				return
			}
			if _, ok := selected[zid]; ok {
				mgr.mgrLog.Trace().Zid(zid).Msg("SelectMeta/alreadySelected")
				return
			}

Changes to box/manager/collect.go.

19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
	"zettelstore.de/z/ast"
	"zettelstore.de/z/box/manager/store"
	"zettelstore.de/z/strfun"
	"zettelstore.de/z/zettel/id"
)

type collectData struct {
	refs  id.Set
	words store.WordSet
	urls  store.WordSet
}

func (data *collectData) initialize() {
	data.refs = id.NewSet()
	data.words = store.NewWordSet()







|







19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
	"zettelstore.de/z/ast"
	"zettelstore.de/z/box/manager/store"
	"zettelstore.de/z/strfun"
	"zettelstore.de/z/zettel/id"
)

type collectData struct {
	refs  *id.Set
	words store.WordSet
	urls  store.WordSet
}

func (data *collectData) initialize() {
	data.refs = id.NewSet()
	data.words = store.NewWordSet()

Changes to box/manager/indexer.go.

27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
	"zettelstore.de/z/zettel"
	"zettelstore.de/z/zettel/id"
	"zettelstore.de/z/zettel/meta"
)

// SearchEqual returns all zettel that contains the given exact word.
// The word must be normalized through Unicode NKFD, trimmed and not empty.
func (mgr *Manager) SearchEqual(word string) id.Set {
	found := mgr.idxStore.SearchEqual(word)
	mgr.idxLog.Debug().Str("word", word).Int("found", int64(len(found))).Msg("SearchEqual")
	if msg := mgr.idxLog.Trace(); msg.Enabled() {
		msg.Str("ids", fmt.Sprint(found)).Msg("IDs")
	}
	return found
}

// SearchPrefix returns all zettel that have a word with the given prefix.
// The prefix must be normalized through Unicode NKFD, trimmed and not empty.
func (mgr *Manager) SearchPrefix(prefix string) id.Set {
	found := mgr.idxStore.SearchPrefix(prefix)
	mgr.idxLog.Debug().Str("prefix", prefix).Int("found", int64(len(found))).Msg("SearchPrefix")
	if msg := mgr.idxLog.Trace(); msg.Enabled() {
		msg.Str("ids", fmt.Sprint(found)).Msg("IDs")
	}
	return found
}

// SearchSuffix returns all zettel that have a word with the given suffix.
// The suffix must be normalized through Unicode NKFD, trimmed and not empty.
func (mgr *Manager) SearchSuffix(suffix string) id.Set {
	found := mgr.idxStore.SearchSuffix(suffix)
	mgr.idxLog.Debug().Str("suffix", suffix).Int("found", int64(len(found))).Msg("SearchSuffix")
	if msg := mgr.idxLog.Trace(); msg.Enabled() {
		msg.Str("ids", fmt.Sprint(found)).Msg("IDs")
	}
	return found
}

// SearchContains returns all zettel that contains the given string.
// The string must be normalized through Unicode NKFD, trimmed and not empty.
func (mgr *Manager) SearchContains(s string) id.Set {
	found := mgr.idxStore.SearchContains(s)
	mgr.idxLog.Debug().Str("s", s).Int("found", int64(len(found))).Msg("SearchContains")
	if msg := mgr.idxLog.Trace(); msg.Enabled() {
		msg.Str("ids", fmt.Sprint(found)).Msg("IDs")
	}
	return found
}

// idxIndexer runs in the background and updates the index data structures.







|

|








|

|








|

|








|

|







27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
	"zettelstore.de/z/zettel"
	"zettelstore.de/z/zettel/id"
	"zettelstore.de/z/zettel/meta"
)

// SearchEqual returns all zettel that contains the given exact word.
// The word must be normalized through Unicode NKFD, trimmed and not empty.
func (mgr *Manager) SearchEqual(word string) *id.Set {
	found := mgr.idxStore.SearchEqual(word)
	mgr.idxLog.Debug().Str("word", word).Int("found", int64(found.Length())).Msg("SearchEqual")
	if msg := mgr.idxLog.Trace(); msg.Enabled() {
		msg.Str("ids", fmt.Sprint(found)).Msg("IDs")
	}
	return found
}

// SearchPrefix returns all zettel that have a word with the given prefix.
// The prefix must be normalized through Unicode NKFD, trimmed and not empty.
func (mgr *Manager) SearchPrefix(prefix string) *id.Set {
	found := mgr.idxStore.SearchPrefix(prefix)
	mgr.idxLog.Debug().Str("prefix", prefix).Int("found", int64(found.Length())).Msg("SearchPrefix")
	if msg := mgr.idxLog.Trace(); msg.Enabled() {
		msg.Str("ids", fmt.Sprint(found)).Msg("IDs")
	}
	return found
}

// SearchSuffix returns all zettel that have a word with the given suffix.
// The suffix must be normalized through Unicode NKFD, trimmed and not empty.
func (mgr *Manager) SearchSuffix(suffix string) *id.Set {
	found := mgr.idxStore.SearchSuffix(suffix)
	mgr.idxLog.Debug().Str("suffix", suffix).Int("found", int64(found.Length())).Msg("SearchSuffix")
	if msg := mgr.idxLog.Trace(); msg.Enabled() {
		msg.Str("ids", fmt.Sprint(found)).Msg("IDs")
	}
	return found
}

// SearchContains returns all zettel that contains the given string.
// The string must be normalized through Unicode NKFD, trimmed and not empty.
func (mgr *Manager) SearchContains(s string) *id.Set {
	found := mgr.idxStore.SearchContains(s)
	mgr.idxLog.Debug().Str("s", s).Int("found", int64(found.Length())).Msg("SearchContains")
	if msg := mgr.idxLog.Trace(); msg.Enabled() {
		msg.Str("ids", fmt.Sprint(found)).Msg("IDs")
	}
	return found
}

// idxIndexer runs in the background and updates the index data structures.
139
140
141
142
143
144
145

146
147
148
149
150
151
152
		if !ok {
			return false
		}
	case _, ok := <-timer.C:
		if !ok {
			return false
		}

		timer.Reset(timerDuration)
	case <-mgr.done:
		if !timer.Stop() {
			<-timer.C
		}
		return false
	}







>







139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
		if !ok {
			return false
		}
	case _, ok := <-timer.C:
		if !ok {
			return false
		}
		mgr.idxStore.Optimize() // TODO: make it less often, for example once per 10 minutes
		timer.Reset(timerDuration)
	case <-mgr.done:
		if !timer.Stop() {
			<-timer.C
		}
		return false
	}
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
		}
	} else {
		stWords.Add(value)
	}
}

func (mgr *Manager) idxProcessData(ctx context.Context, zi *store.ZettelIndex, cData *collectData) {
	for ref := range cData.refs {
		if mgr.HasZettel(ctx, ref) {
			zi.AddBackRef(ref)
		} else {
			zi.AddDeadRef(ref)
		}
	}
	zi.SetWords(cData.words)
	zi.SetUrls(cData.urls)
}

func (mgr *Manager) idxUpdateValue(ctx context.Context, inverseKey, value string, zi *store.ZettelIndex) {
	zid, err := id.Parse(value)
	if err != nil {







|





|







206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
		}
	} else {
		stWords.Add(value)
	}
}

func (mgr *Manager) idxProcessData(ctx context.Context, zi *store.ZettelIndex, cData *collectData) {
	cData.refs.ForEach(func(ref id.Zid) {
		if mgr.HasZettel(ctx, ref) {
			zi.AddBackRef(ref)
		} else {
			zi.AddDeadRef(ref)
		}
	})
	zi.SetWords(cData.words)
	zi.SetUrls(cData.urls)
}

func (mgr *Manager) idxUpdateValue(ctx context.Context, inverseKey, value string, zi *store.ZettelIndex) {
	zid, err := id.Parse(value)
	if err != nil {
242
243
244
245
246
247
248
249
250
251
252
253
}

func (mgr *Manager) idxDeleteZettel(ctx context.Context, zid id.Zid) {
	toCheck := mgr.idxStore.DeleteZettel(ctx, zid)
	mgr.idxCheckZettel(toCheck)
}

func (mgr *Manager) idxCheckZettel(s id.Set) {
	for zid := range s {
		mgr.idxAr.EnqueueZettel(zid)
	}
}







|
|

|

243
244
245
246
247
248
249
250
251
252
253
254
}

func (mgr *Manager) idxDeleteZettel(ctx context.Context, zid id.Zid) {
	toCheck := mgr.idxStore.DeleteZettel(ctx, zid)
	mgr.idxCheckZettel(toCheck)
}

func (mgr *Manager) idxCheckZettel(s *id.Set) {
	s.ForEach(func(zid id.Zid) {
		mgr.idxAr.EnqueueZettel(zid)
	})
}

Changes to box/manager/mapstore/mapstore.go.

28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47




48


49


50
51
52
53
54
55
56
57
58
59
60
61

62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
	"zettelstore.de/z/box/manager/store"
	"zettelstore.de/z/zettel/id"
	"zettelstore.de/z/zettel/meta"
)

type zettelData struct {
	meta      *meta.Meta // a local copy of the metadata, without computed keys
	dead      id.Slice   // list of dead references in this zettel
	forward   id.Slice   // list of forward references in this zettel
	backward  id.Slice   // list of zettel that reference with zettel
	otherRefs map[string]bidiRefs
	words     []string // list of words of this zettel
	urls      []string // list of urls of this zettel
}

type bidiRefs struct {
	forward  id.Slice
	backward id.Slice
}





type stringRefs map[string]id.Slice





type memStore struct {
	mx     sync.RWMutex
	intern map[string]string // map to intern strings
	idx    map[id.Zid]*zettelData
	dead   map[id.Zid]id.Slice // map dead refs where they occur
	words  stringRefs
	urls   stringRefs

	// Stats
	mxStats sync.Mutex
	updates uint64
}


// New returns a new memory-based index store.
func New() store.Store {
	return &memStore{
		intern: make(map[string]string, 1024),
		idx:    make(map[id.Zid]*zettelData),
		dead:   make(map[id.Zid]id.Slice),
		words:  make(stringRefs),
		urls:   make(stringRefs),
	}
}

func (ms *memStore) GetMeta(_ context.Context, zid id.Zid) (*meta.Meta, error) {
	ms.mx.RLock()
	defer ms.mx.RUnlock()
	if zi, found := ms.idx[zid]; found && zi.meta != nil {
		// zi.meta is nil, if zettel was referenced, but is not indexed yet.
		return zi.meta.Clone(), nil
	}
	return nil, box.ErrZettelNotFound{Zid: zid}
}

func (ms *memStore) Enrich(_ context.Context, m *meta.Meta) {
	if ms.doEnrich(m) {
		ms.mxStats.Lock()
		ms.updates++
		ms.mxStats.Unlock()
	}
}

func (ms *memStore) doEnrich(m *meta.Meta) bool {
	ms.mx.RLock()
	defer ms.mx.RUnlock()
	zi, ok := ms.idx[m.Zid]
	if !ok {
		return false
	}
	var updated bool
	if len(zi.dead) > 0 {
		m.Set(api.KeyDead, zi.dead.String())
		updated = true
	}
	back := removeOtherMetaRefs(m, zi.backward.Clone())
	if len(zi.backward) > 0 {
		m.Set(api.KeyBackward, zi.backward.String())
		updated = true
	}
	if len(zi.forward) > 0 {
		m.Set(api.KeyForward, zi.forward.String())
		back = remRefs(back, zi.forward)
		updated = true
	}
	for k, refs := range zi.otherRefs {
		if len(refs.backward) > 0 {
			m.Set(k, refs.backward.String())
			back = remRefs(back, refs.backward)
			updated = true
		}
	}
	if len(back) > 0 {
		m.Set(api.KeyBack, back.String())
		updated = true
	}
	return updated
}

// SearchEqual returns all zettel that contains the given exact word.
// The word must be normalized through Unicode NKFD, trimmed and not empty.
func (ms *memStore) SearchEqual(word string) id.Set {
	ms.mx.RLock()
	defer ms.mx.RUnlock()
	result := id.NewSet()
	if refs, ok := ms.words[word]; ok {
		result.CopySlice(refs)
	}
	if refs, ok := ms.urls[word]; ok {
		result.CopySlice(refs)
	}
	zid, err := id.Parse(word)
	if err != nil {
		return result
	}
	zi, ok := ms.idx[zid]
	if !ok {
		return result
	}

	addBackwardZids(result, zid, zi)
	return result
}

// SearchPrefix returns all zettel that have a word with the given prefix.
// The prefix must be normalized through Unicode NKFD, trimmed and not empty.
func (ms *memStore) SearchPrefix(prefix string) id.Set {
	ms.mx.RLock()
	defer ms.mx.RUnlock()
	result := ms.selectWithPred(prefix, strings.HasPrefix)
	l := len(prefix)
	if l > 14 {
		return result
	}







|
|
|






|
|


>
>
>
>
|
>
>
|
>
>
|



|







>



|


|





|









|







|







|
|



|
|


|
|
|



|
|
|



|
|







|




|


|










|
<




|







28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158

159
160
161
162
163
164
165
166
167
168
169
170
	"zettelstore.de/z/box/manager/store"
	"zettelstore.de/z/zettel/id"
	"zettelstore.de/z/zettel/meta"
)

type zettelData struct {
	meta      *meta.Meta // a local copy of the metadata, without computed keys
	dead      *id.Set    // set of dead references in this zettel
	forward   *id.Set    // set of forward references in this zettel
	backward  *id.Set    // set of zettel that reference with zettel
	otherRefs map[string]bidiRefs
	words     []string // list of words of this zettel
	urls      []string // list of urls of this zettel
}

type bidiRefs struct {
	forward  *id.Set
	backward *id.Set
}

func (zd *zettelData) optimize() {
	zd.dead.Optimize()
	zd.forward.Optimize()
	zd.backward.Optimize()
	for _, bidi := range zd.otherRefs {
		bidi.forward.Optimize()
		bidi.backward.Optimize()
	}
}

type mapStore struct {
	mx     sync.RWMutex
	intern map[string]string // map to intern strings
	idx    map[id.Zid]*zettelData
	dead   map[id.Zid]*id.Set // map dead refs where they occur
	words  stringRefs
	urls   stringRefs

	// Stats
	mxStats sync.Mutex
	updates uint64
}
type stringRefs map[string]*id.Set

// New returns a new memory-based index store.
func New() store.Store {
	return &mapStore{
		intern: make(map[string]string, 1024),
		idx:    make(map[id.Zid]*zettelData),
		dead:   make(map[id.Zid]*id.Set),
		words:  make(stringRefs),
		urls:   make(stringRefs),
	}
}

func (ms *mapStore) GetMeta(_ context.Context, zid id.Zid) (*meta.Meta, error) {
	ms.mx.RLock()
	defer ms.mx.RUnlock()
	if zi, found := ms.idx[zid]; found && zi.meta != nil {
		// zi.meta is nil, if zettel was referenced, but is not indexed yet.
		return zi.meta.Clone(), nil
	}
	return nil, box.ErrZettelNotFound{Zid: zid}
}

func (ms *mapStore) Enrich(_ context.Context, m *meta.Meta) {
	if ms.doEnrich(m) {
		ms.mxStats.Lock()
		ms.updates++
		ms.mxStats.Unlock()
	}
}

func (ms *mapStore) doEnrich(m *meta.Meta) bool {
	ms.mx.RLock()
	defer ms.mx.RUnlock()
	zi, ok := ms.idx[m.Zid]
	if !ok {
		return false
	}
	var updated bool
	if !zi.dead.IsEmpty() {
		m.Set(api.KeyDead, zi.dead.MetaString())
		updated = true
	}
	back := removeOtherMetaRefs(m, zi.backward.Clone())
	if !zi.backward.IsEmpty() {
		m.Set(api.KeyBackward, zi.backward.MetaString())
		updated = true
	}
	if !zi.forward.IsEmpty() {
		m.Set(api.KeyForward, zi.forward.MetaString())
		back.ISubstract(zi.forward)
		updated = true
	}
	for k, refs := range zi.otherRefs {
		if !refs.backward.IsEmpty() {
			m.Set(k, refs.backward.MetaString())
			back.ISubstract(refs.backward)
			updated = true
		}
	}
	if !back.IsEmpty() {
		m.Set(api.KeyBack, back.MetaString())
		updated = true
	}
	return updated
}

// SearchEqual returns all zettel that contains the given exact word.
// The word must be normalized through Unicode NKFD, trimmed and not empty.
func (ms *mapStore) SearchEqual(word string) *id.Set {
	ms.mx.RLock()
	defer ms.mx.RUnlock()
	result := id.NewSet()
	if refs, ok := ms.words[word]; ok {
		result = result.IUnion(refs)
	}
	if refs, ok := ms.urls[word]; ok {
		result = result.IUnion(refs)
	}
	zid, err := id.Parse(word)
	if err != nil {
		return result
	}
	zi, ok := ms.idx[zid]
	if !ok {
		return result
	}

	return addBackwardZids(result, zid, zi)

}

// SearchPrefix returns all zettel that have a word with the given prefix.
// The prefix must be normalized through Unicode NKFD, trimmed and not empty.
func (ms *mapStore) SearchPrefix(prefix string) *id.Set {
	ms.mx.RLock()
	defer ms.mx.RUnlock()
	result := ms.selectWithPred(prefix, strings.HasPrefix)
	l := len(prefix)
	if l > 14 {
		return result
	}
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243










244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362

363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413

414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567

568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
















615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
		minZid, err = id.Parse(prefix + "00000000000000"[:14-l])
		if err != nil {
			return result
		}
	}
	for zid, zi := range ms.idx {
		if minZid <= zid && zid <= maxZid {
			addBackwardZids(result, zid, zi)
		}
	}
	return result
}

// SearchSuffix returns all zettel that have a word with the given suffix.
// The suffix must be normalized through Unicode NKFD, trimmed and not empty.
func (ms *memStore) SearchSuffix(suffix string) id.Set {
	ms.mx.RLock()
	defer ms.mx.RUnlock()
	result := ms.selectWithPred(suffix, strings.HasSuffix)
	l := len(suffix)
	if l > 14 {
		return result
	}
	val, err := id.ParseUint(suffix)
	if err != nil {
		return result
	}
	modulo := uint64(1)
	for range l {
		modulo *= 10
	}
	for zid, zi := range ms.idx {
		if uint64(zid)%modulo == val {
			addBackwardZids(result, zid, zi)
		}
	}
	return result
}

// SearchContains returns all zettel that contains the given string.
// The string must be normalized through Unicode NKFD, trimmed and not empty.
func (ms *memStore) SearchContains(s string) id.Set {
	ms.mx.RLock()
	defer ms.mx.RUnlock()
	result := ms.selectWithPred(s, strings.Contains)
	if len(s) > 14 {
		return result
	}
	if _, err := id.ParseUint(s); err != nil {
		return result
	}
	for zid, zi := range ms.idx {
		if strings.Contains(zid.String(), s) {
			addBackwardZids(result, zid, zi)
		}
	}
	return result
}

func (ms *memStore) selectWithPred(s string, pred func(string, string) bool) id.Set {
	// Must only be called if ms.mx is read-locked!
	result := id.NewSet()
	for word, refs := range ms.words {
		if !pred(word, s) {
			continue
		}
		result.CopySlice(refs)
	}
	for u, refs := range ms.urls {
		if !pred(u, s) {
			continue
		}
		result.CopySlice(refs)










	}
	return result
}

func addBackwardZids(result id.Set, zid id.Zid, zi *zettelData) {
	// Must only be called if ms.mx is read-locked!
	result.Add(zid)
	result.CopySlice(zi.backward)
	for _, mref := range zi.otherRefs {
		result.CopySlice(mref.backward)
	}
}

func removeOtherMetaRefs(m *meta.Meta, back id.Slice) id.Slice {
	for _, p := range m.PairsRest() {
		switch meta.Type(p.Key) {
		case meta.TypeID:
			if zid, err := id.Parse(p.Value); err == nil {
				back = remRef(back, zid)
			}
		case meta.TypeIDSet:
			for _, val := range meta.ListFromValue(p.Value) {
				if zid, err := id.Parse(val); err == nil {
					back = remRef(back, zid)
				}
			}
		}
	}
	return back
}

func (ms *memStore) UpdateReferences(_ context.Context, zidx *store.ZettelIndex) id.Set {
	ms.mx.Lock()
	defer ms.mx.Unlock()
	m := ms.makeMeta(zidx)
	zi, ziExist := ms.idx[zidx.Zid]
	if !ziExist || zi == nil {
		zi = &zettelData{}
		ziExist = false
	}

	// Is this zettel an old dead reference mentioned in other zettel?
	var toCheck id.Set
	if refs, ok := ms.dead[zidx.Zid]; ok {
		// These must be checked later again
		toCheck = id.NewSet(refs...)
		delete(ms.dead, zidx.Zid)
	}

	zi.meta = m
	ms.updateDeadReferences(zidx, zi)
	ids := ms.updateForwardBackwardReferences(zidx, zi)
	toCheck = toCheck.Copy(ids)
	ids = ms.updateMetadataReferences(zidx, zi)
	toCheck = toCheck.Copy(ids)
	zi.words = updateStrings(zidx.Zid, ms.words, zi.words, zidx.GetWords())
	zi.urls = updateStrings(zidx.Zid, ms.urls, zi.urls, zidx.GetUrls())

	// Check if zi must be inserted into ms.idx
	if !ziExist {
		ms.idx[zidx.Zid] = zi
	}

	return toCheck
}

var internableKeys = map[string]bool{
	api.KeyRole:      true,
	api.KeySyntax:    true,
	api.KeyFolgeRole: true,
	api.KeyLang:      true,
	api.KeyReadOnly:  true,
}

func isInternableValue(key string) bool {
	if internableKeys[key] {
		return true
	}
	return strings.HasSuffix(key, meta.SuffixKeyRole)
}

func (ms *memStore) internString(s string) string {
	if is, found := ms.intern[s]; found {
		return is
	}
	ms.intern[s] = s
	return s
}

func (ms *memStore) makeMeta(zidx *store.ZettelIndex) *meta.Meta {
	origM := zidx.GetMeta()
	copyM := meta.New(origM.Zid)
	for _, p := range origM.Pairs() {
		key := ms.internString(p.Key)
		if isInternableValue(key) {
			copyM.Set(key, ms.internString(p.Value))
		} else if key == api.KeyBoxNumber || !meta.IsComputed(key) {
			copyM.Set(key, p.Value)
		}
	}
	return copyM
}

func (ms *memStore) updateDeadReferences(zidx *store.ZettelIndex, zi *zettelData) {
	// Must only be called if ms.mx is write-locked!
	drefs := zidx.GetDeadRefs()
	newRefs, remRefs := refsDiff(drefs, zi.dead)
	zi.dead = drefs
	for _, ref := range remRefs {
		ms.dead[ref] = remRef(ms.dead[ref], zidx.Zid)
	}
	for _, ref := range newRefs {
		ms.dead[ref] = addRef(ms.dead[ref], zidx.Zid)
	}
}

func (ms *memStore) updateForwardBackwardReferences(zidx *store.ZettelIndex, zi *zettelData) id.Set {
	// Must only be called if ms.mx is write-locked!
	brefs := zidx.GetBackRefs()

	newRefs, remRefs := refsDiff(brefs, zi.forward)
	zi.forward = brefs

	var toCheck id.Set
	for _, ref := range remRefs {
		bzi := ms.getOrCreateEntry(ref)
		bzi.backward = remRef(bzi.backward, zidx.Zid)
		if bzi.meta == nil {
			toCheck = toCheck.Add(ref)
		}
	}
	for _, ref := range newRefs {
		bzi := ms.getOrCreateEntry(ref)
		bzi.backward = addRef(bzi.backward, zidx.Zid)
		if bzi.meta == nil {
			toCheck = toCheck.Add(ref)
		}
	}
	return toCheck
}

func (ms *memStore) updateMetadataReferences(zidx *store.ZettelIndex, zi *zettelData) id.Set {
	// Must only be called if ms.mx is write-locked!
	inverseRefs := zidx.GetInverseRefs()
	for key, mr := range zi.otherRefs {
		if _, ok := inverseRefs[key]; ok {
			continue
		}
		ms.removeInverseMeta(zidx.Zid, key, mr.forward)
	}
	if zi.otherRefs == nil {
		zi.otherRefs = make(map[string]bidiRefs)
	}
	var toCheck id.Set
	for key, mrefs := range inverseRefs {
		mr := zi.otherRefs[key]
		newRefs, remRefs := refsDiff(mrefs, mr.forward)
		mr.forward = mrefs
		zi.otherRefs[key] = mr

		for _, ref := range newRefs {
			bzi := ms.getOrCreateEntry(ref)
			if bzi.otherRefs == nil {
				bzi.otherRefs = make(map[string]bidiRefs)
			}
			bmr := bzi.otherRefs[key]
			bmr.backward = addRef(bmr.backward, zidx.Zid)
			bzi.otherRefs[key] = bmr
			if bzi.meta == nil {
				toCheck = toCheck.Add(ref)
			}

		}
		ms.removeInverseMeta(zidx.Zid, key, remRefs)
	}
	return toCheck
}

func updateStrings(zid id.Zid, srefs stringRefs, prev []string, next store.WordSet) []string {
	newWords, removeWords := next.Diff(prev)
	for _, word := range newWords {
		if refs, ok := srefs[word]; ok {
			srefs[word] = addRef(refs, zid)
			continue
		}
		srefs[word] = id.Slice{zid}
	}
	for _, word := range removeWords {
		refs, ok := srefs[word]
		if !ok {
			continue
		}
		refs2 := remRef(refs, zid)
		if len(refs2) == 0 {
			delete(srefs, word)
			continue
		}
		srefs[word] = refs2
	}
	return next.Words()
}

func (ms *memStore) getOrCreateEntry(zid id.Zid) *zettelData {
	// Must only be called if ms.mx is write-locked!
	if zi, ok := ms.idx[zid]; ok {
		return zi
	}
	zi := &zettelData{}
	ms.idx[zid] = zi
	return zi
}

func (ms *memStore) RenameZettel(_ context.Context, curZid, newZid id.Zid) id.Set {
	ms.mx.Lock()
	defer ms.mx.Unlock()

	curZi, curFound := ms.idx[curZid]
	_, newFound := ms.idx[newZid]
	if !curFound || newFound {
		return nil
	}
	newZi := &zettelData{
		meta:      copyMeta(curZi.meta, newZid),
		dead:      ms.copyDeadReferences(curZi.dead),
		forward:   ms.copyForward(curZi.forward, newZid),
		backward:  nil, // will be done through tocheck
		otherRefs: nil, // TODO: check if this will be done through toCheck
		words:     copyStrings(ms.words, curZi.words, newZid),
		urls:      copyStrings(ms.urls, curZi.urls, newZid),
	}

	ms.idx[newZid] = newZi
	toCheck := ms.doDeleteZettel(curZid)
	toCheck = toCheck.CopySlice(ms.dead[newZid])
	delete(ms.dead, newZid)
	toCheck = toCheck.Add(newZid) // should update otherRefs
	return toCheck
}
func copyMeta(m *meta.Meta, newZid id.Zid) *meta.Meta {
	result := m.Clone()
	result.Zid = newZid
	return result
}
func (ms *memStore) copyDeadReferences(curDead id.Slice) id.Slice {
	// Must only be called if ms.mx is write-locked!
	if l := len(curDead); l > 0 {
		result := make(id.Slice, l)
		for i, ref := range curDead {
			result[i] = ref
			ms.dead[ref] = addRef(ms.dead[ref], ref)
		}
		return result
	}
	return nil
}
func (ms *memStore) copyForward(curForward id.Slice, newZid id.Zid) id.Slice {
	// Must only be called if ms.mx is write-locked!
	if l := len(curForward); l > 0 {
		result := make(id.Slice, l)
		for i, ref := range curForward {
			result[i] = ref
			if fzi, found := ms.idx[ref]; found {
				fzi.backward = addRef(fzi.backward, newZid)
			}
		}
		return result
	}
	return nil
}
func copyStrings(msStringMap stringRefs, curStrings []string, newZid id.Zid) []string {
	// Must only be called if ms.mx is write-locked!
	if l := len(curStrings); l > 0 {
		result := make([]string, l)
		for i, s := range curStrings {
			result[i] = s
			msStringMap[s] = addRef(msStringMap[s], newZid)
		}
		return result
	}
	return nil
}

func (ms *memStore) DeleteZettel(_ context.Context, zid id.Zid) id.Set {
	ms.mx.Lock()
	defer ms.mx.Unlock()
	return ms.doDeleteZettel(zid)
}

func (ms *memStore) doDeleteZettel(zid id.Zid) id.Set {
	// Must only be called if ms.mx is write-locked!
	zi, ok := ms.idx[zid]
	if !ok {
		return nil
	}

	ms.deleteDeadSources(zid, zi)
	toCheck := ms.deleteForwardBackward(zid, zi)
	for key, mrefs := range zi.otherRefs {
		ms.removeInverseMeta(zid, key, mrefs.forward)
	}
	deleteStrings(ms.words, zi.words, zid)
	deleteStrings(ms.urls, zi.urls, zid)
	delete(ms.idx, zid)
	return toCheck
}

func (ms *memStore) deleteDeadSources(zid id.Zid, zi *zettelData) {
	// Must only be called if ms.mx is write-locked!
	for _, ref := range zi.dead {
		if drefs, ok := ms.dead[ref]; ok {
			drefs = remRef(drefs, zid)
			if len(drefs) > 0 {
				ms.dead[ref] = drefs
			} else {
				delete(ms.dead, ref)
			}
		}
	}
}

func (ms *memStore) deleteForwardBackward(zid id.Zid, zi *zettelData) id.Set {
	// Must only be called if ms.mx is write-locked!
	for _, ref := range zi.forward {
		if fzi, ok := ms.idx[ref]; ok {
			fzi.backward = remRef(fzi.backward, zid)
		}

	}
	var toCheck id.Set
	for _, ref := range zi.backward {
		if bzi, ok := ms.idx[ref]; ok {
			bzi.forward = remRef(bzi.forward, zid)
			toCheck = toCheck.Add(ref)
		}
	}
	return toCheck
}

func (ms *memStore) removeInverseMeta(zid id.Zid, key string, forward id.Slice) {
	// Must only be called if ms.mx is write-locked!
	for _, ref := range forward {
		bzi, ok := ms.idx[ref]
		if !ok || bzi.otherRefs == nil {
			continue
		}
		bmr, ok := bzi.otherRefs[key]
		if !ok {
			continue
		}
		bmr.backward = remRef(bmr.backward, zid)
		if len(bmr.backward) > 0 || len(bmr.forward) > 0 {
			bzi.otherRefs[key] = bmr
		} else {
			delete(bzi.otherRefs, key)
			if len(bzi.otherRefs) == 0 {
				bzi.otherRefs = nil
			}
		}
	}
}

func deleteStrings(msStringMap stringRefs, curStrings []string, zid id.Zid) {
	// Must only be called if ms.mx is write-locked!
	for _, word := range curStrings {
		refs, ok := msStringMap[word]
		if !ok {
			continue
		}
		refs2 := remRef(refs, zid)
		if len(refs2) == 0 {
			delete(msStringMap, word)
			continue
		}
		msStringMap[word] = refs2
















	}
}

func (ms *memStore) ReadStats(st *store.Stats) {
	ms.mx.RLock()
	st.Zettel = len(ms.idx)
	st.Words = uint64(len(ms.words))
	st.Urls = uint64(len(ms.urls))
	ms.mx.RUnlock()
	ms.mxStats.Lock()
	st.Updates = ms.updates
	ms.mxStats.Unlock()
}

func (ms *memStore) Dump(w io.Writer) {
	ms.mx.RLock()
	defer ms.mx.RUnlock()

	io.WriteString(w, "=== Dump\n")
	ms.dumpIndex(w)
	ms.dumpDead(w)
	dumpStringRefs(w, "Words", "", "", ms.words)
	dumpStringRefs(w, "URLs", "[[", "]]", ms.urls)
}

func (ms *memStore) dumpIndex(w io.Writer) {
	if len(ms.idx) == 0 {
		return
	}
	io.WriteString(w, "==== Zettel Index\n")
	zids := make(id.Slice, 0, len(ms.idx))
	for id := range ms.idx {
		zids = append(zids, id)
	}
	zids.Sort()
	for _, id := range zids {
		fmt.Fprintln(w, "=====", id)
		zi := ms.idx[id]
		if len(zi.dead) > 0 {
			fmt.Fprintln(w, "* Dead:", zi.dead)
		}
		dumpZids(w, "* Forward:", zi.forward)
		dumpZids(w, "* Backward:", zi.backward)

		otherRefs := make([]string, 0, len(zi.otherRefs))
		for k := range zi.otherRefs {
			otherRefs = append(otherRefs, k)
		}
		slices.Sort(otherRefs)
		for _, k := range otherRefs {
			fmt.Fprintln(w, "* Meta", k)
			dumpZids(w, "** Forward:", zi.otherRefs[k].forward)
			dumpZids(w, "** Backward:", zi.otherRefs[k].backward)
		}
		dumpStrings(w, "* Words", "", "", zi.words)
		dumpStrings(w, "* URLs", "[[", "]]", zi.urls)
	}
}

func (ms *memStore) dumpDead(w io.Writer) {
	if len(ms.dead) == 0 {
		return
	}
	fmt.Fprintf(w, "==== Dead References\n")
	zids := make(id.Slice, 0, len(ms.dead))
	for id := range ms.dead {
		zids = append(zids, id)
	}
	zids.Sort()
	for _, id := range zids {
		fmt.Fprintln(w, ";", id)
		fmt.Fprintln(w, ":", ms.dead[id])
	}
}

func dumpZids(w io.Writer, prefix string, zids id.Slice) {
	if len(zids) > 0 {
		io.WriteString(w, prefix)
		for _, zid := range zids {
			io.WriteString(w, " ")
			w.Write(zid.Bytes())
		}
		fmt.Fprintln(w)
	}
}

func dumpStrings(w io.Writer, title, preString, postString string, slice []string) {
	if len(slice) > 0 {
		sl := make([]string, len(slice))
		copy(sl, slice)
		slices.Sort(sl)
		fmt.Fprintln(w, title)
		for _, s := range sl {







|







|

















|







|











|





|






|





|
>
>
>
>
>
>
>
>
>
>




<
<
<
<
<
<
<
<
<
|




|




|







|










|


|






|

|







|


















|







|













|


|

|
|
|
|
|
|


|


>
|


|
|

|



|
|

|



|



|











|


|



|





|




>
|








<
<
<
<
|






|
|



|




|









|




















|









|

|
<
<
<
|
|
<
<
|

|

|
<
<
<
|
|
|
|
<
|
|







|






|





|

















|

|

|
<
|

|


|


|

|

|

>
|
|
|

|


|



|

|


|



|

|
|







|









|
|



|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>



|










|










|












|


|
|








|
|






|















|
|

|


|



<







179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265









266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433




434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494



495
496


497
498
499
500
501



502
503
504
505

506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550

551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713

714
715
716
717
718
719
720
		minZid, err = id.Parse(prefix + "00000000000000"[:14-l])
		if err != nil {
			return result
		}
	}
	for zid, zi := range ms.idx {
		if minZid <= zid && zid <= maxZid {
			result = addBackwardZids(result, zid, zi)
		}
	}
	return result
}

// SearchSuffix returns all zettel that have a word with the given suffix.
// The suffix must be normalized through Unicode NKFD, trimmed and not empty.
func (ms *mapStore) SearchSuffix(suffix string) *id.Set {
	ms.mx.RLock()
	defer ms.mx.RUnlock()
	result := ms.selectWithPred(suffix, strings.HasSuffix)
	l := len(suffix)
	if l > 14 {
		return result
	}
	val, err := id.ParseUint(suffix)
	if err != nil {
		return result
	}
	modulo := uint64(1)
	for range l {
		modulo *= 10
	}
	for zid, zi := range ms.idx {
		if uint64(zid)%modulo == val {
			result = addBackwardZids(result, zid, zi)
		}
	}
	return result
}

// SearchContains returns all zettel that contains the given string.
// The string must be normalized through Unicode NKFD, trimmed and not empty.
func (ms *mapStore) SearchContains(s string) *id.Set {
	ms.mx.RLock()
	defer ms.mx.RUnlock()
	result := ms.selectWithPred(s, strings.Contains)
	if len(s) > 14 {
		return result
	}
	if _, err := id.ParseUint(s); err != nil {
		return result
	}
	for zid, zi := range ms.idx {
		if strings.Contains(zid.String(), s) {
			result = addBackwardZids(result, zid, zi)
		}
	}
	return result
}

func (ms *mapStore) selectWithPred(s string, pred func(string, string) bool) *id.Set {
	// Must only be called if ms.mx is read-locked!
	result := id.NewSet()
	for word, refs := range ms.words {
		if !pred(word, s) {
			continue
		}
		result.IUnion(refs)
	}
	for u, refs := range ms.urls {
		if !pred(u, s) {
			continue
		}
		result.IUnion(refs)
	}
	return result
}

func addBackwardZids(result *id.Set, zid id.Zid, zi *zettelData) *id.Set {
	// Must only be called if ms.mx is read-locked!
	result = result.Add(zid)
	result = result.IUnion(zi.backward)
	for _, mref := range zi.otherRefs {
		result = result.IUnion(mref.backward)
	}
	return result
}










func removeOtherMetaRefs(m *meta.Meta, back *id.Set) *id.Set {
	for _, p := range m.PairsRest() {
		switch meta.Type(p.Key) {
		case meta.TypeID:
			if zid, err := id.Parse(p.Value); err == nil {
				back = back.Remove(zid)
			}
		case meta.TypeIDSet:
			for _, val := range meta.ListFromValue(p.Value) {
				if zid, err := id.Parse(val); err == nil {
					back = back.Remove(zid)
				}
			}
		}
	}
	return back
}

func (ms *mapStore) UpdateReferences(_ context.Context, zidx *store.ZettelIndex) *id.Set {
	ms.mx.Lock()
	defer ms.mx.Unlock()
	m := ms.makeMeta(zidx)
	zi, ziExist := ms.idx[zidx.Zid]
	if !ziExist || zi == nil {
		zi = &zettelData{}
		ziExist = false
	}

	// Is this zettel an old dead reference mentioned in other zettel?
	var toCheck *id.Set
	if refs, ok := ms.dead[zidx.Zid]; ok {
		// These must be checked later again
		toCheck = refs
		delete(ms.dead, zidx.Zid)
	}

	zi.meta = m
	ms.updateDeadReferences(zidx, zi)
	ids := ms.updateForwardBackwardReferences(zidx, zi)
	toCheck = toCheck.IUnion(ids)
	ids = ms.updateMetadataReferences(zidx, zi)
	toCheck = toCheck.IUnion(ids)
	zi.words = updateStrings(zidx.Zid, ms.words, zi.words, zidx.GetWords())
	zi.urls = updateStrings(zidx.Zid, ms.urls, zi.urls, zidx.GetUrls())

	// Check if zi must be inserted into ms.idx
	if !ziExist {
		ms.idx[zidx.Zid] = zi
	}
	zi.optimize()
	return toCheck
}

var internableKeys = map[string]bool{
	api.KeyRole:      true,
	api.KeySyntax:    true,
	api.KeyFolgeRole: true,
	api.KeyLang:      true,
	api.KeyReadOnly:  true,
}

func isInternableValue(key string) bool {
	if internableKeys[key] {
		return true
	}
	return strings.HasSuffix(key, meta.SuffixKeyRole)
}

func (ms *mapStore) internString(s string) string {
	if is, found := ms.intern[s]; found {
		return is
	}
	ms.intern[s] = s
	return s
}

func (ms *mapStore) makeMeta(zidx *store.ZettelIndex) *meta.Meta {
	origM := zidx.GetMeta()
	copyM := meta.New(origM.Zid)
	for _, p := range origM.Pairs() {
		key := ms.internString(p.Key)
		if isInternableValue(key) {
			copyM.Set(key, ms.internString(p.Value))
		} else if key == api.KeyBoxNumber || !meta.IsComputed(key) {
			copyM.Set(key, p.Value)
		}
	}
	return copyM
}

func (ms *mapStore) updateDeadReferences(zidx *store.ZettelIndex, zi *zettelData) {
	// Must only be called if ms.mx is write-locked!
	drefs := zidx.GetDeadRefs()
	newRefs, remRefs := zi.dead.Diff(drefs)
	zi.dead = drefs
	remRefs.ForEach(func(ref id.Zid) {
		ms.dead[ref] = ms.dead[ref].Remove(zidx.Zid)
	})
	newRefs.ForEach(func(ref id.Zid) {
		ms.dead[ref] = ms.dead[ref].Add(zidx.Zid)
	})
}

func (ms *mapStore) updateForwardBackwardReferences(zidx *store.ZettelIndex, zi *zettelData) *id.Set {
	// Must only be called if ms.mx is write-locked!
	brefs := zidx.GetBackRefs()
	newRefs, remRefs := zi.forward.Diff(brefs)
	// newRefs, remRefs := refsDiff(brefs, zi.forward)
	zi.forward = brefs

	var toCheck *id.Set
	remRefs.ForEach(func(ref id.Zid) {
		bzi := ms.getOrCreateEntry(ref)
		bzi.backward = bzi.backward.Remove(zidx.Zid)
		if bzi.meta == nil {
			toCheck = toCheck.Add(ref)
		}
	})
	newRefs.ForEach(func(ref id.Zid) {
		bzi := ms.getOrCreateEntry(ref)
		bzi.backward = bzi.backward.Add(zidx.Zid)
		if bzi.meta == nil {
			toCheck = toCheck.Add(ref)
		}
	})
	return toCheck
}

func (ms *mapStore) updateMetadataReferences(zidx *store.ZettelIndex, zi *zettelData) *id.Set {
	// Must only be called if ms.mx is write-locked!
	inverseRefs := zidx.GetInverseRefs()
	for key, mr := range zi.otherRefs {
		if _, ok := inverseRefs[key]; ok {
			continue
		}
		ms.removeInverseMeta(zidx.Zid, key, mr.forward)
	}
	if zi.otherRefs == nil {
		zi.otherRefs = make(map[string]bidiRefs)
	}
	var toCheck *id.Set
	for key, mrefs := range inverseRefs {
		mr := zi.otherRefs[key]
		newRefs, remRefs := mr.forward.Diff(mrefs)
		mr.forward = mrefs
		zi.otherRefs[key] = mr

		newRefs.ForEach(func(ref id.Zid) {
			bzi := ms.getOrCreateEntry(ref)
			if bzi.otherRefs == nil {
				bzi.otherRefs = make(map[string]bidiRefs)
			}
			bmr := bzi.otherRefs[key]
			bmr.backward = bmr.backward.Add(zidx.Zid)
			bzi.otherRefs[key] = bmr
			if bzi.meta == nil {
				toCheck = toCheck.Add(ref)
			}
		})

		ms.removeInverseMeta(zidx.Zid, key, remRefs)
	}
	return toCheck
}

func updateStrings(zid id.Zid, srefs stringRefs, prev []string, next store.WordSet) []string {
	newWords, removeWords := next.Diff(prev)
	for _, word := range newWords {




		srefs[word] = srefs[word].Add(zid)
	}
	for _, word := range removeWords {
		refs, ok := srefs[word]
		if !ok {
			continue
		}
		refs = refs.Remove(zid)
		if refs.IsEmpty() {
			delete(srefs, word)
			continue
		}
		srefs[word] = refs
	}
	return next.Words()
}

func (ms *mapStore) getOrCreateEntry(zid id.Zid) *zettelData {
	// Must only be called if ms.mx is write-locked!
	if zi, ok := ms.idx[zid]; ok {
		return zi
	}
	zi := &zettelData{}
	ms.idx[zid] = zi
	return zi
}

func (ms *mapStore) RenameZettel(_ context.Context, curZid, newZid id.Zid) *id.Set {
	ms.mx.Lock()
	defer ms.mx.Unlock()

	curZi, curFound := ms.idx[curZid]
	_, newFound := ms.idx[newZid]
	if !curFound || newFound {
		return nil
	}
	newZi := &zettelData{
		meta:      copyMeta(curZi.meta, newZid),
		dead:      ms.copyDeadReferences(curZi.dead),
		forward:   ms.copyForward(curZi.forward, newZid),
		backward:  nil, // will be done through tocheck
		otherRefs: nil, // TODO: check if this will be done through toCheck
		words:     copyStrings(ms.words, curZi.words, newZid),
		urls:      copyStrings(ms.urls, curZi.urls, newZid),
	}

	ms.idx[newZid] = newZi
	toCheck := ms.doDeleteZettel(curZid)
	toCheck = toCheck.IUnion(ms.dead[newZid])
	delete(ms.dead, newZid)
	toCheck = toCheck.Add(newZid) // should update otherRefs
	return toCheck
}
func copyMeta(m *meta.Meta, newZid id.Zid) *meta.Meta {
	result := m.Clone()
	result.Zid = newZid
	return result
}
func (ms *mapStore) copyDeadReferences(curDead *id.Set) *id.Set {
	// Must only be called if ms.mx is write-locked!
	curDead.ForEach(func(ref id.Zid) {



		ms.dead[ref] = ms.dead[ref].Add(ref)
	})


	return curDead.Clone()
}
func (ms *mapStore) copyForward(curForward *id.Set, newZid id.Zid) *id.Set {
	// Must only be called if ms.mx is write-locked!
	curForward.ForEach(func(ref id.Zid) {



		if fzi, found := ms.idx[ref]; found {
			fzi.backward = fzi.backward.Add(newZid)
		}


	})
	return curForward.Clone()
}
func copyStrings(msStringMap stringRefs, curStrings []string, newZid id.Zid) []string {
	// Must only be called if ms.mx is write-locked!
	if l := len(curStrings); l > 0 {
		result := make([]string, l)
		for i, s := range curStrings {
			result[i] = s
			msStringMap[s] = msStringMap[s].Add(newZid)
		}
		return result
	}
	return nil
}

func (ms *mapStore) DeleteZettel(_ context.Context, zid id.Zid) *id.Set {
	ms.mx.Lock()
	defer ms.mx.Unlock()
	return ms.doDeleteZettel(zid)
}

func (ms *mapStore) doDeleteZettel(zid id.Zid) *id.Set {
	// Must only be called if ms.mx is write-locked!
	zi, ok := ms.idx[zid]
	if !ok {
		return nil
	}

	ms.deleteDeadSources(zid, zi)
	toCheck := ms.deleteForwardBackward(zid, zi)
	for key, mrefs := range zi.otherRefs {
		ms.removeInverseMeta(zid, key, mrefs.forward)
	}
	deleteStrings(ms.words, zi.words, zid)
	deleteStrings(ms.urls, zi.urls, zid)
	delete(ms.idx, zid)
	return toCheck
}

func (ms *mapStore) deleteDeadSources(zid id.Zid, zi *zettelData) {
	// Must only be called if ms.mx is write-locked!
	zi.dead.ForEach(func(ref id.Zid) {
		if drefs, ok := ms.dead[ref]; ok {
			if drefs = drefs.Remove(zid); drefs.IsEmpty() {

				delete(ms.dead, ref)
			} else {
				ms.dead[ref] = drefs
			}
		}
	})
}

func (ms *mapStore) deleteForwardBackward(zid id.Zid, zi *zettelData) *id.Set {
	// Must only be called if ms.mx is write-locked!
	zi.forward.ForEach(func(ref id.Zid) {
		if fzi, ok := ms.idx[ref]; ok {
			fzi.backward = fzi.backward.Remove(zid)
		}
	})

	var toCheck *id.Set
	zi.backward.ForEach(func(ref id.Zid) {
		if bzi, ok := ms.idx[ref]; ok {
			bzi.forward = bzi.forward.Remove(zid)
			toCheck = toCheck.Add(ref)
		}
	})
	return toCheck
}

func (ms *mapStore) removeInverseMeta(zid id.Zid, key string, forward *id.Set) {
	// Must only be called if ms.mx is write-locked!
	forward.ForEach(func(ref id.Zid) {
		bzi, ok := ms.idx[ref]
		if !ok || bzi.otherRefs == nil {
			return
		}
		bmr, ok := bzi.otherRefs[key]
		if !ok {
			return
		}
		bmr.backward = bmr.backward.Remove(zid)
		if !bmr.backward.IsEmpty() || !bmr.forward.IsEmpty() {
			bzi.otherRefs[key] = bmr
		} else {
			delete(bzi.otherRefs, key)
			if len(bzi.otherRefs) == 0 {
				bzi.otherRefs = nil
			}
		}
	})
}

func deleteStrings(msStringMap stringRefs, curStrings []string, zid id.Zid) {
	// Must only be called if ms.mx is write-locked!
	for _, word := range curStrings {
		refs, ok := msStringMap[word]
		if !ok {
			continue
		}
		refs = refs.Remove(zid)
		if refs.IsEmpty() {
			delete(msStringMap, word)
			continue
		}
		msStringMap[word] = refs
	}
}

func (ms *mapStore) Optimize() {
	ms.mx.Lock()
	defer ms.mx.Unlock()

	// No need to optimize ms.idx: is already done via ms.UpdateReferences
	for _, dead := range ms.dead {
		dead.Optimize()
	}
	for _, s := range ms.words {
		s.Optimize()
	}
	for _, s := range ms.urls {
		s.Optimize()
	}
}

func (ms *mapStore) ReadStats(st *store.Stats) {
	ms.mx.RLock()
	st.Zettel = len(ms.idx)
	st.Words = uint64(len(ms.words))
	st.Urls = uint64(len(ms.urls))
	ms.mx.RUnlock()
	ms.mxStats.Lock()
	st.Updates = ms.updates
	ms.mxStats.Unlock()
}

func (ms *mapStore) Dump(w io.Writer) {
	ms.mx.RLock()
	defer ms.mx.RUnlock()

	io.WriteString(w, "=== Dump\n")
	ms.dumpIndex(w)
	ms.dumpDead(w)
	dumpStringRefs(w, "Words", "", "", ms.words)
	dumpStringRefs(w, "URLs", "[[", "]]", ms.urls)
}

func (ms *mapStore) dumpIndex(w io.Writer) {
	if len(ms.idx) == 0 {
		return
	}
	io.WriteString(w, "==== Zettel Index\n")
	zids := make(id.Slice, 0, len(ms.idx))
	for id := range ms.idx {
		zids = append(zids, id)
	}
	zids.Sort()
	for _, id := range zids {
		fmt.Fprintln(w, "=====", id)
		zi := ms.idx[id]
		if !zi.dead.IsEmpty() {
			fmt.Fprintln(w, "* Dead:", zi.dead)
		}
		dumpSet(w, "* Forward:", zi.forward)
		dumpSet(w, "* Backward:", zi.backward)

		otherRefs := make([]string, 0, len(zi.otherRefs))
		for k := range zi.otherRefs {
			otherRefs = append(otherRefs, k)
		}
		slices.Sort(otherRefs)
		for _, k := range otherRefs {
			fmt.Fprintln(w, "* Meta", k)
			dumpSet(w, "** Forward:", zi.otherRefs[k].forward)
			dumpSet(w, "** Backward:", zi.otherRefs[k].backward)
		}
		dumpStrings(w, "* Words", "", "", zi.words)
		dumpStrings(w, "* URLs", "[[", "]]", zi.urls)
	}
}

func (ms *mapStore) dumpDead(w io.Writer) {
	if len(ms.dead) == 0 {
		return
	}
	fmt.Fprintf(w, "==== Dead References\n")
	zids := make(id.Slice, 0, len(ms.dead))
	for id := range ms.dead {
		zids = append(zids, id)
	}
	zids.Sort()
	for _, id := range zids {
		fmt.Fprintln(w, ";", id)
		fmt.Fprintln(w, ":", ms.dead[id])
	}
}

func dumpSet(w io.Writer, prefix string, s *id.Set) {
	if !s.IsEmpty() {
		io.WriteString(w, prefix)
		s.ForEach(func(zid id.Zid) {
			io.WriteString(w, " ")
			w.Write(zid.Bytes())
		})
		fmt.Fprintln(w)
	}
}

func dumpStrings(w io.Writer, title, preString, postString string, slice []string) {
	if len(slice) > 0 {
		sl := make([]string, len(slice))
		copy(sl, slice)
		slices.Sort(sl)
		fmt.Fprintln(w, title)
		for _, s := range sl {

Deleted box/manager/mapstore/refs.go.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
//-----------------------------------------------------------------------------
// Copyright (c) 2021-present Detlef Stern
//
// This file is part of Zettelstore.
//
// Zettelstore is licensed under the latest version of the EUPL (European Union
// Public License). Please see file LICENSE.txt for your rights and obligations
// under this license.
//
// SPDX-License-Identifier: EUPL-1.2
// SPDX-FileCopyrightText: 2021-present Detlef Stern
//-----------------------------------------------------------------------------

package mapstore

import (
	"slices"

	"zettelstore.de/z/zettel/id"
)

func refsDiff(refsN, refsO id.Slice) (newRefs, remRefs id.Slice) {
	npos, opos := 0, 0
	for npos < len(refsN) && opos < len(refsO) {
		rn, ro := refsN[npos], refsO[opos]
		if rn == ro {
			npos++
			opos++
			continue
		}
		if rn < ro {
			newRefs = append(newRefs, rn)
			npos++
			continue
		}
		remRefs = append(remRefs, ro)
		opos++
	}
	if npos < len(refsN) {
		newRefs = append(newRefs, refsN[npos:]...)
	}
	if opos < len(refsO) {
		remRefs = append(remRefs, refsO[opos:]...)
	}
	return newRefs, remRefs
}

func addRef(refs id.Slice, ref id.Zid) id.Slice {
	hi := len(refs)
	for lo := 0; lo < hi; {
		m := lo + (hi-lo)/2
		if r := refs[m]; r == ref {
			return refs
		} else if r < ref {
			lo = m + 1
		} else {
			hi = m
		}
	}
	refs = slices.Insert(refs, hi, ref)
	return refs
}

func remRefs(refs, rem id.Slice) id.Slice {
	if len(refs) == 0 || len(rem) == 0 {
		return refs
	}
	result := make(id.Slice, 0, len(refs))
	rpos, dpos := 0, 0
	for rpos < len(refs) && dpos < len(rem) {
		rr, dr := refs[rpos], rem[dpos]
		if rr < dr {
			result = append(result, rr)
			rpos++
			continue
		}
		if dr < rr {
			dpos++
			continue
		}
		rpos++
		dpos++
	}
	if rpos < len(refs) {
		result = append(result, refs[rpos:]...)
	}
	return result
}

func remRef(refs id.Slice, ref id.Zid) id.Slice {
	hi := len(refs)
	for lo := 0; lo < hi; {
		m := lo + (hi-lo)/2
		if r := refs[m]; r == ref {
			copy(refs[m:], refs[m+1:])
			refs = refs[:len(refs)-1]
			return refs
		} else if r < ref {
			lo = m + 1
		} else {
			hi = m
		}
	}
	return refs
}
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<


















































































































































































































Deleted box/manager/mapstore/refs_test.go.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
//-----------------------------------------------------------------------------
// Copyright (c) 2021-present Detlef Stern
//
// This file is part of Zettelstore.
//
// Zettelstore is licensed under the latest version of the EUPL (European Union
// Public License). Please see file LICENSE.txt for your rights and obligations
// under this license.
//
// SPDX-License-Identifier: EUPL-1.2
// SPDX-FileCopyrightText: 2021-present Detlef Stern
//-----------------------------------------------------------------------------

package mapstore

import (
	"testing"

	"zettelstore.de/z/zettel/id"
)

func assertRefs(t *testing.T, i int, got, exp id.Slice) {
	t.Helper()
	if got == nil && exp != nil {
		t.Errorf("%d: got nil, but expected %v", i, exp)
		return
	}
	if got != nil && exp == nil {
		t.Errorf("%d: expected nil, but got %v", i, got)
		return
	}
	if len(got) != len(exp) {
		t.Errorf("%d: expected len(%v)==%d, but got len(%v)==%d", i, exp, len(exp), got, len(got))
		return
	}
	for p, n := range exp {
		if got := got[p]; got != id.Zid(n) {
			t.Errorf("%d: pos %d: expected %d, but got %d", i, p, n, got)
		}
	}
}

func TestRefsDiff(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		in1, in2   id.Slice
		exp1, exp2 id.Slice
	}{
		{nil, nil, nil, nil},
		{id.Slice{1}, nil, id.Slice{1}, nil},
		{nil, id.Slice{1}, nil, id.Slice{1}},
		{id.Slice{1}, id.Slice{1}, nil, nil},
		{id.Slice{1, 2}, id.Slice{1}, id.Slice{2}, nil},
		{id.Slice{1, 2}, id.Slice{1, 3}, id.Slice{2}, id.Slice{3}},
		{id.Slice{1, 4}, id.Slice{1, 3}, id.Slice{4}, id.Slice{3}},
	}
	for i, tc := range testcases {
		got1, got2 := refsDiff(tc.in1, tc.in2)
		assertRefs(t, i, got1, tc.exp1)
		assertRefs(t, i, got2, tc.exp2)
	}
}

func TestAddRef(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		ref id.Slice
		zid uint
		exp id.Slice
	}{
		{nil, 5, id.Slice{5}},
		{id.Slice{1}, 5, id.Slice{1, 5}},
		{id.Slice{10}, 5, id.Slice{5, 10}},
		{id.Slice{5}, 5, id.Slice{5}},
		{id.Slice{1, 10}, 5, id.Slice{1, 5, 10}},
		{id.Slice{1, 5, 10}, 5, id.Slice{1, 5, 10}},
	}
	for i, tc := range testcases {
		got := addRef(tc.ref, id.Zid(tc.zid))
		assertRefs(t, i, got, tc.exp)
	}
}

func TestRemRefs(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		in1, in2 id.Slice
		exp      id.Slice
	}{
		{nil, nil, nil},
		{nil, id.Slice{}, nil},
		{id.Slice{}, nil, id.Slice{}},
		{id.Slice{}, id.Slice{}, id.Slice{}},
		{id.Slice{1}, id.Slice{5}, id.Slice{1}},
		{id.Slice{10}, id.Slice{5}, id.Slice{10}},
		{id.Slice{1, 5}, id.Slice{5}, id.Slice{1}},
		{id.Slice{5, 10}, id.Slice{5}, id.Slice{10}},
		{id.Slice{1, 10}, id.Slice{5}, id.Slice{1, 10}},
		{id.Slice{1}, id.Slice{2, 5}, id.Slice{1}},
		{id.Slice{10}, id.Slice{2, 5}, id.Slice{10}},
		{id.Slice{1, 5}, id.Slice{2, 5}, id.Slice{1}},
		{id.Slice{5, 10}, id.Slice{2, 5}, id.Slice{10}},
		{id.Slice{1, 2, 5}, id.Slice{2, 5}, id.Slice{1}},
		{id.Slice{2, 5, 10}, id.Slice{2, 5}, id.Slice{10}},
		{id.Slice{1, 10}, id.Slice{2, 5}, id.Slice{1, 10}},
		{id.Slice{1}, id.Slice{5, 9}, id.Slice{1}},
		{id.Slice{10}, id.Slice{5, 9}, id.Slice{10}},
		{id.Slice{1, 5}, id.Slice{5, 9}, id.Slice{1}},
		{id.Slice{5, 10}, id.Slice{5, 9}, id.Slice{10}},
		{id.Slice{1, 5, 9}, id.Slice{5, 9}, id.Slice{1}},
		{id.Slice{5, 9, 10}, id.Slice{5, 9}, id.Slice{10}},
		{id.Slice{1, 10}, id.Slice{5, 9}, id.Slice{1, 10}},
	}
	for i, tc := range testcases {
		got := remRefs(tc.in1, tc.in2)
		assertRefs(t, i, got, tc.exp)
	}
}

func TestRemRef(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		ref id.Slice
		zid uint
		exp id.Slice
	}{
		{nil, 5, nil},
		{id.Slice{}, 5, id.Slice{}},
		{id.Slice{5}, 5, id.Slice{}},
		{id.Slice{1}, 5, id.Slice{1}},
		{id.Slice{10}, 5, id.Slice{10}},
		{id.Slice{1, 5}, 5, id.Slice{1}},
		{id.Slice{5, 10}, 5, id.Slice{10}},
		{id.Slice{1, 5, 10}, 5, id.Slice{1, 10}},
	}
	for i, tc := range testcases {
		got := remRef(tc.ref, id.Zid(tc.zid))
		assertRefs(t, i, got, tc.exp)
	}
}
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
























































































































































































































































































Changes to box/manager/store/store.go.

47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62



63
64
65
66
67
68
69
	GetMeta(context.Context, id.Zid) (*meta.Meta, error)

	// Entrich metadata with data from store.
	Enrich(ctx context.Context, m *meta.Meta)

	// UpdateReferences for a specific zettel.
	// Returns set of zettel identifier that must also be checked for changes.
	UpdateReferences(context.Context, *ZettelIndex) id.Set

	// RenameZettel changes all references of current zettel identifier to new
	// zettel identifier.
	RenameZettel(_ context.Context, curZid, newZid id.Zid) id.Set

	// DeleteZettel removes index data for given zettel.
	// Returns set of zettel identifier that must also be checked for changes.
	DeleteZettel(context.Context, id.Zid) id.Set




	// ReadStats populates st with store statistics.
	ReadStats(st *Stats)

	// Dump the content to a Writer.
	Dump(io.Writer)
}







|



|



|
>
>
>







47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
	GetMeta(context.Context, id.Zid) (*meta.Meta, error)

	// Entrich metadata with data from store.
	Enrich(ctx context.Context, m *meta.Meta)

	// UpdateReferences for a specific zettel.
	// Returns set of zettel identifier that must also be checked for changes.
	UpdateReferences(context.Context, *ZettelIndex) *id.Set

	// RenameZettel changes all references of current zettel identifier to new
	// zettel identifier.
	RenameZettel(_ context.Context, curZid, newZid id.Zid) *id.Set

	// DeleteZettel removes index data for given zettel.
	// Returns set of zettel identifier that must also be checked for changes.
	DeleteZettel(context.Context, id.Zid) *id.Set

	// Optimize removes unneeded space.
	Optimize()

	// ReadStats populates st with store statistics.
	ReadStats(st *Stats)

	// Dump the content to a Writer.
	Dump(io.Writer)
}

Changes to box/manager/store/zettel.go.

16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import (
	"zettelstore.de/z/zettel/id"
	"zettelstore.de/z/zettel/meta"
)

// ZettelIndex contains all index data of a zettel.
type ZettelIndex struct {
	Zid         id.Zid            // zid of the indexed zettel
	meta        *meta.Meta        // full metadata
	backrefs    id.Set            // set of back references
	inverseRefs map[string]id.Set // references of inverse keys
	deadrefs    id.Set            // set of dead references
	words       WordSet
	urls        WordSet
}

// NewZettelIndex creates a new zettel index.
func NewZettelIndex(m *meta.Meta) *ZettelIndex {
	return &ZettelIndex{
		Zid:         m.Zid,
		meta:        m,
		backrefs:    id.NewSet(),
		inverseRefs: make(map[string]id.Set),
		deadrefs:    id.NewSet(),
	}
}

// AddBackRef adds a reference to a zettel where the current zettel links to
// without any more information.
func (zi *ZettelIndex) AddBackRef(zid id.Zid) {
	zi.backrefs.Add(zid)
}

// AddInverseRef adds a named reference to a zettel. On that zettel, the given
// metadata key should point back to the current zettel.
func (zi *ZettelIndex) AddInverseRef(key string, zid id.Zid) {
	if zids, ok := zi.inverseRefs[key]; ok {
		zids.Add(zid)
		return







|
|
|
|
|










|






|
<
<







16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45


46
47
48
49
50
51
52
import (
	"zettelstore.de/z/zettel/id"
	"zettelstore.de/z/zettel/meta"
)

// ZettelIndex contains all index data of a zettel.
type ZettelIndex struct {
	Zid         id.Zid             // zid of the indexed zettel
	meta        *meta.Meta         // full metadata
	backrefs    *id.Set            // set of back references
	inverseRefs map[string]*id.Set // references of inverse keys
	deadrefs    *id.Set            // set of dead references
	words       WordSet
	urls        WordSet
}

// NewZettelIndex creates a new zettel index.
func NewZettelIndex(m *meta.Meta) *ZettelIndex {
	return &ZettelIndex{
		Zid:         m.Zid,
		meta:        m,
		backrefs:    id.NewSet(),
		inverseRefs: make(map[string]*id.Set),
		deadrefs:    id.NewSet(),
	}
}

// AddBackRef adds a reference to a zettel where the current zettel links to
// without any more information.
func (zi *ZettelIndex) AddBackRef(zid id.Zid) { zi.backrefs.Add(zid) }



// AddInverseRef adds a named reference to a zettel. On that zettel, the given
// metadata key should point back to the current zettel.
func (zi *ZettelIndex) AddInverseRef(key string, zid id.Zid) {
	if zids, ok := zi.inverseRefs[key]; ok {
		zids.Add(zid)
		return
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
// SetWords sets the words to the given value.
func (zi *ZettelIndex) SetWords(words WordSet) { zi.words = words }

// SetUrls sets the words to the given value.
func (zi *ZettelIndex) SetUrls(urls WordSet) { zi.urls = urls }

// GetDeadRefs returns all dead references as a sorted list.
func (zi *ZettelIndex) GetDeadRefs() id.Slice { return zi.deadrefs.Sorted() }

// GetMeta return just the raw metadata.
func (zi *ZettelIndex) GetMeta() *meta.Meta { return zi.meta }

// GetBackRefs returns all back references as a sorted list.
func (zi *ZettelIndex) GetBackRefs() id.Slice { return zi.backrefs.Sorted() }

// GetInverseRefs returns all inverse meta references as a map of strings to a sorted list of references
func (zi *ZettelIndex) GetInverseRefs() map[string]id.Slice {
	if len(zi.inverseRefs) == 0 {
		return nil
	}
	result := make(map[string]id.Slice, len(zi.inverseRefs))
	for key, refs := range zi.inverseRefs {
		result[key] = refs.Sorted()
	}
	return result
}

// GetWords returns a reference to the set of words. It must not be modified.
func (zi *ZettelIndex) GetWords() WordSet { return zi.words }

// GetUrls returns a reference to the set of URLs. It must not be modified.
func (zi *ZettelIndex) GetUrls() WordSet { return zi.urls }







|





|


|



|

|









62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
// SetWords sets the words to the given value.
func (zi *ZettelIndex) SetWords(words WordSet) { zi.words = words }

// SetUrls sets the words to the given value.
func (zi *ZettelIndex) SetUrls(urls WordSet) { zi.urls = urls }

// GetDeadRefs returns all dead references as a sorted list.
func (zi *ZettelIndex) GetDeadRefs() *id.Set { return zi.deadrefs }

// GetMeta return just the raw metadata.
func (zi *ZettelIndex) GetMeta() *meta.Meta { return zi.meta }

// GetBackRefs returns all back references as a sorted list.
func (zi *ZettelIndex) GetBackRefs() *id.Set { return zi.backrefs }

// GetInverseRefs returns all inverse meta references as a map of strings to a sorted list of references
func (zi *ZettelIndex) GetInverseRefs() map[string]*id.Set {
	if len(zi.inverseRefs) == 0 {
		return nil
	}
	result := make(map[string]*id.Set, len(zi.inverseRefs))
	for key, refs := range zi.inverseRefs {
		result[key] = refs
	}
	return result
}

// GetWords returns a reference to the set of words. It must not be modified.
func (zi *ZettelIndex) GetWords() WordSet { return zi.words }

// GetUrls returns a reference to the set of URLs. It must not be modified.
func (zi *ZettelIndex) GetUrls() WordSet { return zi.urls }

Changes to docs/manual/00001012053500.zettel.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
id: 00001012053500
title: API: Retrieve evaluated metadata and content of an existing zettel in various encodings
role: manual
tags: #api #manual #zettelstore
syntax: zmk
created: 20210726174524
modified: 20230807170112

The [[endpoint|00001012920000]] to work with evaluated metadata and content of a specific zettel is ''/z/{ID}'', where ''{ID}'' is a placeholder for the [[zettel identifier|00001006050000]].

For example, to retrieve some evaluated data about this zettel you are currently viewing in [[Sz encoding|00001012920516]], just send a HTTP GET request to the endpoint ''/z/00001012053500''[^If [[authentication is enabled|00001010040100]], you must include the a valid [[access token|00001012050200]] in the ''Authorization'' header] with the query parameter ''enc=sz''.
If successful, the output is a symbolic expression value:
```sh
# curl 'http://127.0.0.1:23123/z/00001012053500?enc=sz'
((PARA (TEXT "The") (SPACE) (LINK-ZETTEL () "00001012920000" (TEXT "endpoint")) (SPACE) (TEXT "to") (SPACE) (TEXT "work") (SPACE) (TEXT "with") (SPACE) (TEXT "evaluated") (SPACE) (TEXT "metadata") (SPACE) (TEXT "and") (SPACE) (TEXT "content") (SPACE) (TEXT "of") (SPACE) (TEXT "a") (SPACE) (TEXT "specific") (SPACE) (TEXT "zettel") (SPACE) (TEXT "is") (SPACE) (LITERAL-INPUT () "/z/{ID}") (TEXT ",") (SPACE) (TEXT "where") (SPACE) (LITERAL-INPUT () "{ID}") ...
```

To select another encoding, you must provide the query parameter ''enc=ENCODING''.
Others are ""[[html|00001012920510]]"", ""[[text|00001012920519]]"", and some [[more|00001012920500]].
In addition, you may provide a query parameter ''part=PART'' to select the relevant [[part|00001012920800]] of a zettel.
```sh
# curl 'http://127.0.0.1:23123/z/00001012053500?enc=html&part=zettel'






|







|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
id: 00001012053500
title: API: Retrieve evaluated metadata and content of an existing zettel in various encodings
role: manual
tags: #api #manual #zettelstore
syntax: zmk
created: 20210726174524
modified: 20240620171057

The [[endpoint|00001012920000]] to work with evaluated metadata and content of a specific zettel is ''/z/{ID}'', where ''{ID}'' is a placeholder for the [[zettel identifier|00001006050000]].

For example, to retrieve some evaluated data about this zettel you are currently viewing in [[Sz encoding|00001012920516]], just send a HTTP GET request to the endpoint ''/z/00001012053500''[^If [[authentication is enabled|00001010040100]], you must include the a valid [[access token|00001012050200]] in the ''Authorization'' header] with the query parameter ''enc=sz''.
If successful, the output is a symbolic expression value:
```sh
# curl 'http://127.0.0.1:23123/z/00001012053500?enc=sz'
(BLOCK (PARA (TEXT "The ") (LINK-ZETTEL () "00001012920000" (TEXT "endpoint")) (TEXT " to work with parsed metadata and content of a specific zettel is ") (LITERAL-INPUT () "/z/{ID}") (TEXT ", where ") (LITERAL-INPUT () "{ID}") (TEXT " is a placeholder for the ") ...
```

To select another encoding, you must provide the query parameter ''enc=ENCODING''.
Others are ""[[html|00001012920510]]"", ""[[text|00001012920519]]"", and some [[more|00001012920500]].
In addition, you may provide a query parameter ''part=PART'' to select the relevant [[part|00001012920800]] of a zettel.
```sh
# curl 'http://127.0.0.1:23123/z/00001012053500?enc=html&part=zettel'

Changes to docs/manual/00001012053600.zettel.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
id: 00001012053600
title: API: Retrieve parsed metadata and content of an existing zettel in various encodings
role: manual
tags: #api #manual #zettelstore
syntax: zmk
created: 20210126175322
modified: 20230807170019

The [[endpoint|00001012920000]] to work with parsed metadata and content of a specific zettel is ''/z/{ID}'', where ''{ID}'' is a placeholder for the [[zettel identifier|00001006050000]].

A __parsed__ zettel is basically an [[unevaluated|00001012053500]] zettel: the zettel is read and analyzed, but its content is not __evaluated__.
By using this endpoint, you are able to retrieve the structure of a zettel before it is evaluated.

For example, to retrieve some data about this zettel you are currently viewing, just send a HTTP GET request to the endpoint ''/z/00001012053600''[^If [[authentication is enabled|00001010040100]], you must include the a valid [[access token|00001012050200]] in the ''Authorization'' header] with the query parameter ''parseonly'' (and other appropriate query parameter).
For example:
```sh
# curl 'http://127.0.0.1:23123/z/00001012053600?enc=sz&parseonly'
((PARA (TEXT "The") (SPACE) (LINK-ZETTEL () "00001012920000" (TEXT "endpoint")) (SPACE) (TEXT "to") (SPACE) (TEXT "work") (SPACE) (TEXT "with") (SPACE) ...
```

Similar to [[retrieving an encoded zettel|00001012053500]], you can specify an [[encoding|00001012920500]] and state which [[part|00001012920800]] of a zettel you are interested in.
The same default values applies to this endpoint.

=== HTTP Status codes
; ''200''






|










|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
id: 00001012053600
title: API: Retrieve parsed metadata and content of an existing zettel in various encodings
role: manual
tags: #api #manual #zettelstore
syntax: zmk
created: 20210126175322
modified: 20240620170909

The [[endpoint|00001012920000]] to work with parsed metadata and content of a specific zettel is ''/z/{ID}'', where ''{ID}'' is a placeholder for the [[zettel identifier|00001006050000]].

A __parsed__ zettel is basically an [[unevaluated|00001012053500]] zettel: the zettel is read and analyzed, but its content is not __evaluated__.
By using this endpoint, you are able to retrieve the structure of a zettel before it is evaluated.

For example, to retrieve some data about this zettel you are currently viewing, just send a HTTP GET request to the endpoint ''/z/00001012053600''[^If [[authentication is enabled|00001010040100]], you must include the a valid [[access token|00001012050200]] in the ''Authorization'' header] with the query parameter ''parseonly'' (and other appropriate query parameter).
For example:
```sh
# curl 'http://127.0.0.1:23123/z/00001012053600?enc=sz&parseonly'
(BLOCK (PARA (TEXT "The ") (LINK-ZETTEL () "00001012920000" (TEXT "endpoint")) (TEXT " to work with parsed metadata and content of a specific zettel is ") (LITERAL-INPUT () "/z/{ID}") (TEXT ", where ") ...
```

Similar to [[retrieving an encoded zettel|00001012053500]], you can specify an [[encoding|00001012920500]] and state which [[part|00001012920800]] of a zettel you are interested in.
The same default values applies to this endpoint.

=== HTTP Status codes
; ''200''

Changes to docs/manual/00001012931600.zettel.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
id: 00001012931600
title: Encoding of Sz Inline Elements
role: manual
tags: #api #manual #reference #zettelstore
syntax: zmk
created: 20230403161845
modified: 20240122122448

=== ''TEXT''
:::syntax
__Text__ **=** ''(TEXT'' String '')''.
:::
Specifies the string as some text content, typically a word.

=== ''SPACE''
:::syntax
__Space__ **=** ''(SPACE'' **[** String **]** '')''.
:::
Specifies some space, typically white space.
If the string is not given it is assumed to be ''" "'' (one space character).
Otherwise it contains the space characters.

=== ''SOFT''
:::syntax
__Soft__ **=** ''(SOFT)''.
:::
Denotes a soft line break.
It is typically translated into a space character, but signals the point in the textual content, where a line break occurred.

=== ''HARD''
:::syntax
__Hard__ **=** ''(HARD)''.
:::
Specifies a hard line break, i.e. the user wants to have a line break here.







|





|
<
<
<
<
<
<
<
<






|







1
2
3
4
5
6
7
8
9
10
11
12
13








14
15
16
17
18
19
20
21
22
23
24
25
26
27
id: 00001012931600
title: Encoding of Sz Inline Elements
role: manual
tags: #api #manual #reference #zettelstore
syntax: zmk
created: 20230403161845
modified: 20240620170546

=== ''TEXT''
:::syntax
__Text__ **=** ''(TEXT'' String '')''.
:::
Specifies the string as some text content, including white space characters.









=== ''SOFT''
:::syntax
__Soft__ **=** ''(SOFT)''.
:::
Denotes a soft line break.
It will be often encoded as a space character, but signals the point in the textual content, where a line break occurred.

=== ''HARD''
:::syntax
__Hard__ **=** ''(HARD)''.
:::
Specifies a hard line break, i.e. the user wants to have a line break here.

Changes to encoder/encoder_block_test.go.

28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
	},
	{
		descr: "Simple text: Hello, world",
		zmk:   "Hello, world",
		expect: expectMap{
			encoderHTML:  "<p>Hello, world</p>",
			encoderMD:    "Hello, world",
			encoderSz:    `(BLOCK (PARA (TEXT "Hello,") (SPACE) (TEXT "world")))`,
			encoderSHTML: `((p "Hello," " " "world"))`,
			encoderText:  "Hello, world",
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Simple block comment",
		zmk:   "%%%\nNo\nrender\n%%%",







|
|







28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
	},
	{
		descr: "Simple text: Hello, world",
		zmk:   "Hello, world",
		expect: expectMap{
			encoderHTML:  "<p>Hello, world</p>",
			encoderMD:    "Hello, world",
			encoderSz:    `(BLOCK (PARA (TEXT "Hello, world")))`,
			encoderSHTML: `((p "Hello, world"))`,
			encoderText:  "Hello, world",
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Simple block comment",
		zmk:   "%%%\nNo\nrender\n%%%",
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
	},
	{
		descr: "Simple Heading",
		zmk:   `=== Top Job`,
		expect: expectMap{
			encoderHTML:  "<h2 id=\"top-job\">Top Job</h2>",
			encoderMD:    "# Top Job",
			encoderSz:    `(BLOCK (HEADING 1 () "top-job" "top-job" (TEXT "Top") (SPACE) (TEXT "Job")))`,
			encoderSHTML: `((h2 (@ (id . "top-job")) "Top" " " "Job"))`,
			encoderText:  `Top Job`,
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Simple List",
		zmk:   "* A\n* B\n* C",







|
|







64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
	},
	{
		descr: "Simple Heading",
		zmk:   `=== Top Job`,
		expect: expectMap{
			encoderHTML:  "<h2 id=\"top-job\">Top Job</h2>",
			encoderMD:    "# Top Job",
			encoderSz:    `(BLOCK (HEADING 1 () "top-job" "top-job" (TEXT "Top Job")))`,
			encoderSHTML: `((h2 (@ (id . "top-job")) "Top Job"))`,
			encoderText:  `Top Job`,
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Simple List",
		zmk:   "* A\n* B\n* C",
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
	},
	{
		descr: "Simple Quote Block",
		zmk:   "<<<\nToBeOrNotToBe\n<<< Romeo Julia",
		expect: expectMap{
			encoderHTML:  "<blockquote><p>ToBeOrNotToBe</p><cite>Romeo Julia</cite></blockquote>",
			encoderMD:    "> ToBeOrNotToBe",
			encoderSz:    `(BLOCK (REGION-QUOTE () ((PARA (TEXT "ToBeOrNotToBe"))) (TEXT "Romeo") (SPACE) (TEXT "Julia")))`,
			encoderSHTML: `((blockquote (p "ToBeOrNotToBe") (cite "Romeo" " " "Julia")))`,
			encoderText:  "ToBeOrNotToBe\nRomeo Julia",
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Quote Block with multiple paragraphs",
		zmk:   "<<<\nToBeOr\n\nNotToBe\n<<< Romeo",







|
|







172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
	},
	{
		descr: "Simple Quote Block",
		zmk:   "<<<\nToBeOrNotToBe\n<<< Romeo Julia",
		expect: expectMap{
			encoderHTML:  "<blockquote><p>ToBeOrNotToBe</p><cite>Romeo Julia</cite></blockquote>",
			encoderMD:    "> ToBeOrNotToBe",
			encoderSz:    `(BLOCK (REGION-QUOTE () ((PARA (TEXT "ToBeOrNotToBe"))) (TEXT "Romeo Julia")))`,
			encoderSHTML: `((blockquote (p "ToBeOrNotToBe") (cite "Romeo Julia")))`,
			encoderText:  "ToBeOrNotToBe\nRomeo Julia",
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Quote Block with multiple paragraphs",
		zmk:   "<<<\nToBeOr\n\nNotToBe\n<<< Romeo",
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
Paragraph

    Spacy  Para
""" Author`,
		expect: expectMap{
			encoderHTML:  "<div><p>A\u00a0line<br>\u00a0\u00a0another\u00a0line<br>Back</p><p>Paragraph</p><p>\u00a0\u00a0\u00a0\u00a0Spacy\u00a0\u00a0Para</p><cite>Author</cite></div>",
			encoderMD:    "",
			encoderSz:    "(BLOCK (REGION-VERSE () ((PARA (TEXT \"A\") (SPACE \"\u00a0\") (TEXT \"line\") (HARD) (SPACE \"\u00a0\u00a0\") (TEXT \"another\") (SPACE \"\u00a0\") (TEXT \"line\") (HARD) (TEXT \"Back\")) (PARA (TEXT \"Paragraph\")) (PARA (SPACE \"\u00a0\u00a0\u00a0\u00a0\") (TEXT \"Spacy\") (SPACE \"\u00a0\u00a0\") (TEXT \"Para\"))) (TEXT \"Author\")))",
			encoderSHTML: "((div (p \"A\" \"\u00a0\" \"line\" (br) \"\u00a0\u00a0\" \"another\" \"\u00a0\" \"line\" (br) \"Back\") (p \"Paragraph\") (p \"\u00a0\u00a0\u00a0\u00a0\" \"Spacy\" \"\u00a0\u00a0\" \"Para\") (cite \"Author\")))",
			encoderText:  "A line\n another line\nBack\nParagraph\n Spacy Para\nAuthor",
			encoderZmk:   "\"\"\"\nA\u00a0line\\\n\u00a0\u00a0another\u00a0line\\\nBack\nParagraph\n\u00a0\u00a0\u00a0\u00a0Spacy\u00a0\u00a0Para\n\"\"\" Author",
		},
	},
	{
		descr: "Span Block",
		zmk: `:::
A simple
   span
and much more
:::`,
		expect: expectMap{
			encoderHTML:  "<div><p>A simple  span and much more</p></div>",
			encoderMD:    "",
			encoderSz:    `(BLOCK (REGION-BLOCK () ((PARA (TEXT "A") (SPACE) (TEXT "simple") (SOFT) (SPACE) (TEXT "span") (SOFT) (TEXT "and") (SPACE) (TEXT "much") (SPACE) (TEXT "more")))))`,
			encoderSHTML: `((div (p "A" " " "simple" " " " " "span" " " "and" " " "much" " " "more")))`,
			encoderText:  `A simple  span and much more`,
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Simple Verbatim Code",
		zmk:   "```\nHello\nWorld\n```",







|
|












|

|
|







204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
Paragraph

    Spacy  Para
""" Author`,
		expect: expectMap{
			encoderHTML:  "<div><p>A\u00a0line<br>\u00a0\u00a0another\u00a0line<br>Back</p><p>Paragraph</p><p>\u00a0\u00a0\u00a0\u00a0Spacy\u00a0\u00a0Para</p><cite>Author</cite></div>",
			encoderMD:    "",
			encoderSz:    "(BLOCK (REGION-VERSE () ((PARA (TEXT \"A\u00a0line\") (HARD) (TEXT \"\u00a0\u00a0another\u00a0line\") (HARD) (TEXT \"Back\")) (PARA (TEXT \"Paragraph\")) (PARA (TEXT \"\u00a0\u00a0\u00a0\u00a0Spacy\u00a0\u00a0Para\"))) (TEXT \"Author\")))",
			encoderSHTML: "((div (p \"A\u00a0line\" (br) \"\u00a0\u00a0another\u00a0line\" (br) \"Back\") (p \"Paragraph\") (p \"\u00a0\u00a0\u00a0\u00a0Spacy\u00a0\u00a0Para\") (cite \"Author\")))",
			encoderText:  "A line\n another line\nBack\nParagraph\n Spacy Para\nAuthor",
			encoderZmk:   "\"\"\"\nA\u00a0line\\\n\u00a0\u00a0another\u00a0line\\\nBack\nParagraph\n\u00a0\u00a0\u00a0\u00a0Spacy\u00a0\u00a0Para\n\"\"\" Author",
		},
	},
	{
		descr: "Span Block",
		zmk: `:::
A simple
   span
and much more
:::`,
		expect: expectMap{
			encoderHTML:  "<div><p>A simple    span and much more</p></div>",
			encoderMD:    "",
			encoderSz:    `(BLOCK (REGION-BLOCK () ((PARA (TEXT "A simple") (SOFT) (TEXT "   span") (SOFT) (TEXT "and much more")))))`,
			encoderSHTML: `((div (p "A simple" " " "   span" " " "and much more")))`,
			encoderText:  `A simple  span and much more`,
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Simple Verbatim Code",
		zmk:   "```\nHello\nWorld\n```",
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
	},
	{
		descr: "Simple Description List",
		zmk:   "; Zettel\n: Paper\n: Note\n; Zettelkasten\n: Slip box",
		expect: expectMap{
			encoderHTML:  "<dl><dt>Zettel</dt><dd><p>Paper</p></dd><dd><p>Note</p></dd><dt>Zettelkasten</dt><dd><p>Slip box</p></dd></dl>",
			encoderMD:    "",
			encoderSz:    `(BLOCK (DESCRIPTION ((TEXT "Zettel")) (BLOCK (BLOCK (PARA (TEXT "Paper"))) (BLOCK (PARA (TEXT "Note")))) ((TEXT "Zettelkasten")) (BLOCK (BLOCK (PARA (TEXT "Slip") (SPACE) (TEXT "box"))))))`,
			encoderSHTML: `((dl (dt "Zettel") (dd (p "Paper")) (dd (p "Note")) (dt "Zettelkasten") (dd (p "Slip" " " "box"))))`,
			encoderText:  "Zettel\nPaper\nNote\nZettelkasten\nSlip box",
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Description List with paragraphs as item",
		zmk:   "; Zettel\n: Paper\n\n  Note\n; Zettelkasten\n: Slip box",
		expect: expectMap{
			encoderHTML:  "<dl><dt>Zettel</dt><dd><p>Paper</p><p>Note</p></dd><dt>Zettelkasten</dt><dd><p>Slip box</p></dd></dl>",
			encoderMD:    "",
			encoderSz:    `(BLOCK (DESCRIPTION ((TEXT "Zettel")) (BLOCK (BLOCK (PARA (TEXT "Paper")) (PARA (TEXT "Note")))) ((TEXT "Zettelkasten")) (BLOCK (BLOCK (PARA (TEXT "Slip") (SPACE) (TEXT "box"))))))`,
			encoderSHTML: `((dl (dt "Zettel") (dd (p "Paper") (p "Note")) (dt "Zettelkasten") (dd (p "Slip" " " "box"))))`,
			encoderText:  "Zettel\nPaper\nNote\nZettelkasten\nSlip box",
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Description List with keys, but no descriptions",
		zmk:   "; K1\n: D11\n: D12\n; K2\n; K3\n: D31",







|
|










|
|







280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
	},
	{
		descr: "Simple Description List",
		zmk:   "; Zettel\n: Paper\n: Note\n; Zettelkasten\n: Slip box",
		expect: expectMap{
			encoderHTML:  "<dl><dt>Zettel</dt><dd><p>Paper</p></dd><dd><p>Note</p></dd><dt>Zettelkasten</dt><dd><p>Slip box</p></dd></dl>",
			encoderMD:    "",
			encoderSz:    `(BLOCK (DESCRIPTION ((TEXT "Zettel")) (BLOCK (BLOCK (PARA (TEXT "Paper"))) (BLOCK (PARA (TEXT "Note")))) ((TEXT "Zettelkasten")) (BLOCK (BLOCK (PARA (TEXT "Slip box"))))))`,
			encoderSHTML: `((dl (dt "Zettel") (dd (p "Paper")) (dd (p "Note")) (dt "Zettelkasten") (dd (p "Slip box"))))`,
			encoderText:  "Zettel\nPaper\nNote\nZettelkasten\nSlip box",
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Description List with paragraphs as item",
		zmk:   "; Zettel\n: Paper\n\n  Note\n; Zettelkasten\n: Slip box",
		expect: expectMap{
			encoderHTML:  "<dl><dt>Zettel</dt><dd><p>Paper</p><p>Note</p></dd><dt>Zettelkasten</dt><dd><p>Slip box</p></dd></dl>",
			encoderMD:    "",
			encoderSz:    `(BLOCK (DESCRIPTION ((TEXT "Zettel")) (BLOCK (BLOCK (PARA (TEXT "Paper")) (PARA (TEXT "Note")))) ((TEXT "Zettelkasten")) (BLOCK (BLOCK (PARA (TEXT "Slip box"))))))`,
			encoderSHTML: `((dl (dt "Zettel") (dd (p "Paper") (p "Note")) (dt "Zettelkasten") (dd (p "Slip box"))))`,
			encoderText:  "Zettel\nPaper\nNote\nZettelkasten\nSlip box",
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Description List with keys, but no descriptions",
		zmk:   "; K1\n: D11\n: D12\n; K2\n; K3\n: D31",

Changes to encoder/encoder_inline_test.go.

28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
	},
	{
		descr: "Simple text: Hello, world (inline)",
		zmk:   `Hello, world`,
		expect: expectMap{
			encoderHTML:  "Hello, world",
			encoderMD:    "Hello, world",
			encoderSz:    `(INLINE (TEXT "Hello,") (SPACE) (TEXT "world"))`,
			encoderSHTML: `("Hello," " " "world")`,
			encoderText:  "Hello, world",
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Soft Break",
		zmk:   "soft\nbreak",







|
|







28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
	},
	{
		descr: "Simple text: Hello, world (inline)",
		zmk:   `Hello, world`,
		expect: expectMap{
			encoderHTML:  "Hello, world",
			encoderMD:    "Hello, world",
			encoderSz:    `(INLINE (TEXT "Hello, world"))`,
			encoderSHTML: `("Hello, world")`,
			encoderText:  "Hello, world",
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Soft Break",
		zmk:   "soft\nbreak",
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
	},
	{
		descr: "Nested quotes (default)",
		zmk:   `""say: ::""yes, ::""or?""::""::""`,
		expect: expectMap{
			encoderHTML:  `&ldquo;say: <span>&lsquo;yes, <span>&ldquo;or?&rdquo;</span>&rsquo;</span>&rdquo;`,
			encoderMD:    "<q>say: <q>yes, <q>or?</q></q></q>",
			encoderSz:    `(INLINE (FORMAT-QUOTE () (TEXT "say:") (SPACE) (FORMAT-SPAN () (FORMAT-QUOTE () (TEXT "yes,") (SPACE) (FORMAT-SPAN () (FORMAT-QUOTE () (TEXT "or?")))))))`,
			encoderSHTML: `((@L (@H "&ldquo;") "say:" " " (span (@L (@H "&lsquo;") "yes," " " (span (@L (@H "&ldquo;") "or?" (@H "&rdquo;"))) (@H "&rsquo;"))) (@H "&rdquo;")))`,
			encoderText:  `say: yes, or?`,
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Two quotes",
		zmk:   `""yes"" or ""no""`,
		expect: expectMap{
			encoderHTML:  `&ldquo;yes&rdquo; or &ldquo;no&rdquo;`,
			encoderMD:    "<q>yes</q> or <q>no</q>",
			encoderSz:    `(INLINE (FORMAT-QUOTE () (TEXT "yes")) (SPACE) (TEXT "or") (SPACE) (FORMAT-QUOTE () (TEXT "no")))`,
			encoderSHTML: `((@L (@H "&ldquo;") "yes" (@H "&rdquo;")) " " "or" " " (@L (@H "&ldquo;") "no" (@H "&rdquo;")))`,
			encoderText:  `yes or no`,
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Mark formatting",
		zmk:   `##marked##`,







|
|










|
|







196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
	},
	{
		descr: "Nested quotes (default)",
		zmk:   `""say: ::""yes, ::""or?""::""::""`,
		expect: expectMap{
			encoderHTML:  `&ldquo;say: <span>&lsquo;yes, <span>&ldquo;or?&rdquo;</span>&rsquo;</span>&rdquo;`,
			encoderMD:    "<q>say: <q>yes, <q>or?</q></q></q>",
			encoderSz:    `(INLINE (FORMAT-QUOTE () (TEXT "say: ") (FORMAT-SPAN () (FORMAT-QUOTE () (TEXT "yes, ") (FORMAT-SPAN () (FORMAT-QUOTE () (TEXT "or?")))))))`,
			encoderSHTML: `((@L (@H "&ldquo;") "say: " (span (@L (@H "&lsquo;") "yes, " (span (@L (@H "&ldquo;") "or?" (@H "&rdquo;"))) (@H "&rsquo;"))) (@H "&rdquo;")))`,
			encoderText:  `say: yes, or?`,
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Two quotes",
		zmk:   `""yes"" or ""no""`,
		expect: expectMap{
			encoderHTML:  `&ldquo;yes&rdquo; or &ldquo;no&rdquo;`,
			encoderMD:    "<q>yes</q> or <q>no</q>",
			encoderSz:    `(INLINE (FORMAT-QUOTE () (TEXT "yes")) (TEXT " or ") (FORMAT-QUOTE () (TEXT "no")))`,
			encoderSHTML: `((@L (@H "&ldquo;") "yes" (@H "&rdquo;")) " or " (@L (@H "&ldquo;") "no" (@H "&rdquo;")))`,
			encoderText:  `yes or no`,
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Mark formatting",
		zmk:   `##marked##`,
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
	},
	{
		descr: "HTML in Code formatting",
		zmk:   "``<script `` abc",
		expect: expectMap{
			encoderHTML:  "<code>&lt;script </code> abc",
			encoderMD:    "`<script ` abc",
			encoderSz:    `(INLINE (LITERAL-CODE () "<script ") (SPACE) (TEXT "abc"))`,
			encoderSHTML: `((code "<script ") " " "abc")`,
			encoderText:  `<script  abc`,
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Input formatting",
		zmk:   `''input''`,







|
|







268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
	},
	{
		descr: "HTML in Code formatting",
		zmk:   "``<script `` abc",
		expect: expectMap{
			encoderHTML:  "<code>&lt;script </code> abc",
			encoderMD:    "`<script ` abc",
			encoderSz:    `(INLINE (LITERAL-CODE () "<script ") (TEXT " abc"))`,
			encoderSHTML: `((code "<script ") " abc")`,
			encoderText:  `<script  abc`,
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Input formatting",
		zmk:   `''input''`,
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
		},
	}, {
		descr: "No comment",
		zmk:   `% comment`,
		expect: expectMap{
			encoderHTML:  `% comment`,
			encoderMD:    "% comment",
			encoderSz:    `(INLINE (TEXT "%") (SPACE) (TEXT "comment"))`,
			encoderSHTML: `("%" " " "comment")`,
			encoderText:  `% comment`,
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Line comment (nogen HTML)",
		zmk:   `%% line comment`,







|
|







351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
		},
	}, {
		descr: "No comment",
		zmk:   `% comment`,
		expect: expectMap{
			encoderHTML:  `% comment`,
			encoderMD:    "% comment",
			encoderSz:    `(INLINE (TEXT "% comment"))`,
			encoderSHTML: `("% comment")`,
			encoderText:  `% comment`,
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Line comment (nogen HTML)",
		zmk:   `%% line comment`,
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
	},
	{
		descr: "Mark with text",
		zmk:   `[!mark|with text]`,
		expect: expectMap{
			encoderHTML:  `<a id="mark">with text</a>`,
			encoderMD:    "with text",
			encoderSz:    `(INLINE (MARK "mark" "mark" "mark" (TEXT "with") (SPACE) (TEXT "text")))`,
			encoderSHTML: `((a (@ (id . "mark")) "with" " " "text"))`,
			encoderText:  `with text`,
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Invalid Link",
		zmk:   `[[link|00000000000000]]`,







|
|







435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
	},
	{
		descr: "Mark with text",
		zmk:   `[!mark|with text]`,
		expect: expectMap{
			encoderHTML:  `<a id="mark">with text</a>`,
			encoderMD:    "with text",
			encoderSz:    `(INLINE (MARK "mark" "mark" "mark" (TEXT "with text")))`,
			encoderSHTML: `((a (@ (id . "mark")) "with text"))`,
			encoderText:  `with text`,
			encoderZmk:   useZmk,
		},
	},
	{
		descr: "Invalid Link",
		zmk:   `[[link|00000000000000]]`,

Changes to encoder/mdenc/mdenc.go.

117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
		v.visitNestedList(n)
	case *ast.DescriptionListNode:
		return nil // Should write no content
	case *ast.TableNode:
		return nil // Should write no content
	case *ast.TextNode:
		v.b.WriteString(n.Text)
	case *ast.SpaceNode:
		v.b.WriteString(n.Lexeme)
	case *ast.BreakNode:
		v.visitBreak(n)
	case *ast.LinkNode:
		v.visitLink(n)
	case *ast.EmbedRefNode:
		v.visitEmbedRef(n)
	case *ast.FootnoteNode:







<
<







117
118
119
120
121
122
123


124
125
126
127
128
129
130
		v.visitNestedList(n)
	case *ast.DescriptionListNode:
		return nil // Should write no content
	case *ast.TableNode:
		return nil // Should write no content
	case *ast.TextNode:
		v.b.WriteString(n.Text)


	case *ast.BreakNode:
		v.visitBreak(n)
	case *ast.LinkNode:
		v.visitLink(n)
	case *ast.EmbedRefNode:
		v.visitEmbedRef(n)
	case *ast.FootnoteNode:

Changes to encoder/szenc/transform.go.

69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
		return t.getTable(n)
	case *ast.TranscludeNode:
		return sx.MakeList(sz.SymTransclude, getAttributes(n.Attrs), getReference(n.Ref))
	case *ast.BLOBNode:
		return t.getBLOB(n)
	case *ast.TextNode:
		return sx.MakeList(sz.SymText, sx.MakeString(n.Text))
	case *ast.SpaceNode:
		if t.inVerse {
			return sx.MakeList(sz.SymSpace, sx.MakeString(n.Lexeme))
		}
		return sx.MakeList(sz.SymSpace)
	case *ast.BreakNode:
		if n.Hard {
			return sx.MakeList(sz.SymHard)
		}
		return sx.MakeList(sz.SymSoft)
	case *ast.LinkNode:
		return t.getLink(n)







<
<
<
<
<







69
70
71
72
73
74
75





76
77
78
79
80
81
82
		return t.getTable(n)
	case *ast.TranscludeNode:
		return sx.MakeList(sz.SymTransclude, getAttributes(n.Attrs), getReference(n.Ref))
	case *ast.BLOBNode:
		return t.getBLOB(n)
	case *ast.TextNode:
		return sx.MakeList(sz.SymText, sx.MakeString(n.Text))





	case *ast.BreakNode:
		if n.Hard {
			return sx.MakeList(sz.SymHard)
		}
		return sx.MakeList(sz.SymSoft)
	case *ast.LinkNode:
		return t.getLink(n)

Changes to encoder/textenc/textenc.go.

14
15
16
17
18
19
20

21
22
23
24
25
26
27
// Package textenc encodes the abstract syntax tree into its text.
package textenc

import (
	"io"

	"t73f.de/r/zsc/api"

	"zettelstore.de/z/ast"
	"zettelstore.de/z/encoder"
	"zettelstore.de/z/zettel/meta"
)

func init() {
	encoder.Register(api.EncoderText, func(*encoder.CreateParameter) encoder.Encoder { return Create() })







>







14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
// Package textenc encodes the abstract syntax tree into its text.
package textenc

import (
	"io"

	"t73f.de/r/zsc/api"
	"t73f.de/r/zsc/input"
	"zettelstore.de/z/ast"
	"zettelstore.de/z/encoder"
	"zettelstore.de/z/zettel/meta"
)

func init() {
	encoder.Register(api.EncoderText, func(*encoder.CreateParameter) encoder.Encoder { return Create() })
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
		return nil
	case *ast.TableNode:
		v.visitTable(n)
		return nil
	case *ast.TranscludeNode, *ast.BLOBNode:
		return nil
	case *ast.TextNode:
		v.b.WriteString(n.Text)
		return nil
	case *ast.SpaceNode:
		v.b.WriteByte(' ')
		return nil
	case *ast.BreakNode:
		if n.Hard {
			v.b.WriteByte('\n')
		} else {
			v.b.WriteByte(' ')
		}







|
<
<
<







129
130
131
132
133
134
135
136



137
138
139
140
141
142
143
		return nil
	case *ast.TableNode:
		v.visitTable(n)
		return nil
	case *ast.TranscludeNode, *ast.BLOBNode:
		return nil
	case *ast.TextNode:
		v.visitText(n.Text)



		return nil
	case *ast.BreakNode:
		if n.Hard {
			v.b.WriteByte('\n')
		} else {
			v.b.WriteByte(' ')
		}
226
227
228
229
230
231
232















233
234
235
236
237
238
func (v *visitor) visitInlineSlice(is *ast.InlineSlice) {
	for i, in := range *is {
		v.inlinePos = i
		ast.Walk(v, in)
	}
	v.inlinePos = 0
}
















func (v *visitor) writePosChar(pos int, ch byte) {
	if pos > 0 {
		v.b.WriteByte(ch)
	}
}







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>






224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
func (v *visitor) visitInlineSlice(is *ast.InlineSlice) {
	for i, in := range *is {
		v.inlinePos = i
		ast.Walk(v, in)
	}
	v.inlinePos = 0
}

func (v *visitor) visitText(s string) {
	spaceFound := false
	for _, ch := range s {
		if input.IsSpace(ch) {
			if !spaceFound {
				v.b.WriteByte(' ')
				spaceFound = true
			}
			continue
		}
		spaceFound = false
		v.b.WriteString(string(ch))
	}
}

func (v *visitor) writePosChar(pos int, ch byte) {
	if pos > 0 {
		v.b.WriteByte(ch)
	}
}

Changes to encoder/zmkenc/zmkenc.go.

136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
	case *ast.TranscludeNode:
		v.b.WriteStrings("{{{", n.Ref.String(), "}}}")
		v.visitAttributes(n.Attrs)
	case *ast.BLOBNode:
		v.visitBLOB(n)
	case *ast.TextNode:
		v.visitText(n)
	case *ast.SpaceNode:
		v.b.WriteString(n.Lexeme)
	case *ast.BreakNode:
		v.visitBreak(n)
	case *ast.LinkNode:
		v.visitLink(n)
	case *ast.EmbedRefNode:
		v.visitEmbedRef(n)
	case *ast.EmbedBLOBNode:







<
<







136
137
138
139
140
141
142


143
144
145
146
147
148
149
	case *ast.TranscludeNode:
		v.b.WriteStrings("{{{", n.Ref.String(), "}}}")
		v.visitAttributes(n.Attrs)
	case *ast.BLOBNode:
		v.visitBLOB(n)
	case *ast.TextNode:
		v.visitText(n)


	case *ast.BreakNode:
		v.visitBreak(n)
	case *ast.LinkNode:
		v.visitLink(n)
	case *ast.EmbedRefNode:
		v.visitEmbedRef(n)
	case *ast.EmbedBLOBNode:

Changes to evaluator/evaluator.go.

631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
		ast.Walk(fs, bn)
	}
}

func (fs *fragmentSearcher) visitInlineSlice(is *ast.InlineSlice) {
	for i, in := range *is {
		if mn, ok := in.(*ast.MarkNode); ok && mn.Fragment == fs.fragment {
			ris := skipSpaceNodes((*is)[i+1:])
			if len(mn.Inlines) > 0 {
				fs.result = append(ast.InlineSlice{}, mn.Inlines...)
				fs.result = append(fs.result, &ast.SpaceNode{Lexeme: " "})
				fs.result = append(fs.result, ris...)
			} else {
				fs.result = ris
			}
			return
		}
		ast.Walk(fs, in)
	}
}

func skipSpaceNodes(ins ast.InlineSlice) ast.InlineSlice {
	for i, in := range ins {
		switch in.(type) {
		case *ast.SpaceNode:
		case *ast.BreakNode:
		default:
			return ins[i:]
		}
	}
	return nil
}







|


|










|


<







631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654

655
656
657
658
659
660
661
		ast.Walk(fs, bn)
	}
}

func (fs *fragmentSearcher) visitInlineSlice(is *ast.InlineSlice) {
	for i, in := range *is {
		if mn, ok := in.(*ast.MarkNode); ok && mn.Fragment == fs.fragment {
			ris := skipBreakeNodes((*is)[i+1:])
			if len(mn.Inlines) > 0 {
				fs.result = append(ast.InlineSlice{}, mn.Inlines...)
				fs.result = append(fs.result, &ast.TextNode{Text: " "})
				fs.result = append(fs.result, ris...)
			} else {
				fs.result = ris
			}
			return
		}
		ast.Walk(fs, in)
	}
}

func skipBreakeNodes(ins ast.InlineSlice) ast.InlineSlice {
	for i, in := range ins {
		switch in.(type) {

		case *ast.BreakNode:
		default:
			return ins[i:]
		}
	}
	return nil
}

Changes to evaluator/list.go.

147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
	ccs = ap.limitTags(ccs)
	countMap := ap.calcFontSizes(ccs)

	para := make(ast.InlineSlice, 0, len(ccs))
	ccs.SortByName()
	for i, cat := range ccs {
		if i > 0 {
			para = append(para, &ast.SpaceNode{Lexeme: " "})
		}
		buf.WriteString(cat.Name)
		para = append(para,
			&ast.LinkNode{
				Attrs: countMap[cat.Count],
				Ref:   ast.ParseReference(buf.String()),
				Inlines: ast.InlineSlice{







|







147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
	ccs = ap.limitTags(ccs)
	countMap := ap.calcFontSizes(ccs)

	para := make(ast.InlineSlice, 0, len(ccs))
	ccs.SortByName()
	for i, cat := range ccs {
		if i > 0 {
			para = append(para, &ast.TextNode{Text: " "})
		}
		buf.WriteString(cat.Name)
		para = append(para,
			&ast.LinkNode{
				Attrs: countMap[cat.Count],
				Ref:   ast.ParseReference(buf.String()),
				Inlines: ast.InlineSlice{
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236

		items = append(items, ast.ItemSlice{ast.CreateParaNode(
			&ast.LinkNode{
				Attrs:   nil,
				Ref:     ast.ParseReference(q1),
				Inlines: ast.InlineSlice{&ast.TextNode{Text: cat.Name}},
			},
			&ast.SpaceNode{Lexeme: " "},
			&ast.TextNode{Text: "(" + strconv.Itoa(cat.Count) + ", "},
			&ast.LinkNode{
				Attrs:   nil,
				Ref:     ast.ParseReference(q2),
				Inlines: ast.InlineSlice{&ast.TextNode{Text: "values"}},
			},
			&ast.TextNode{Text: ")"},







|







222
223
224
225
226
227
228
229
230
231
232
233
234
235
236

		items = append(items, ast.ItemSlice{ast.CreateParaNode(
			&ast.LinkNode{
				Attrs:   nil,
				Ref:     ast.ParseReference(q1),
				Inlines: ast.InlineSlice{&ast.TextNode{Text: cat.Name}},
			},
			&ast.TextNode{Text: " "},
			&ast.TextNode{Text: "(" + strconv.Itoa(cat.Count) + ", "},
			&ast.LinkNode{
				Attrs:   nil,
				Ref:     ast.ParseReference(q2),
				Inlines: ast.InlineSlice{&ast.TextNode{Text: "values"}},
			},
			&ast.TextNode{Text: ")"},

Changes to evaluator/metadata.go.

46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
		sliceData = []string{value}
	}
	makeLink := dt == meta.TypeID || dt == meta.TypeIDSet

	result := make(ast.InlineSlice, 0, 2*len(sliceData)-1)
	for i, val := range sliceData {
		if i > 0 {
			result = append(result, &ast.SpaceNode{Lexeme: " "})
		}
		tn := &ast.TextNode{Text: val}
		if makeLink {
			result = append(result, &ast.LinkNode{
				Ref:     ast.ParseReference(val),
				Inlines: ast.InlineSlice{tn},
			})







|







46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
		sliceData = []string{value}
	}
	makeLink := dt == meta.TypeID || dt == meta.TypeIDSet

	result := make(ast.InlineSlice, 0, 2*len(sliceData)-1)
	for i, val := range sliceData {
		if i > 0 {
			result = append(result, &ast.TextNode{Text: " "})
		}
		tn := &ast.TextNode{Text: val}
		if makeLink {
			result = append(result, &ast.LinkNode{
				Ref:     ast.ParseReference(val),
				Inlines: ast.InlineSlice{tn},
			})

Changes to go.mod.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
module zettelstore.de/z

go 1.22

require (
	github.com/fsnotify/fsnotify v1.7.0
	github.com/yuin/goldmark v1.7.3
	golang.org/x/crypto v0.24.0
	golang.org/x/term v0.21.0
	golang.org/x/text v0.16.0
	t73f.de/r/sx v0.0.0-20240513163553-ec4fcc6539ca
	t73f.de/r/sxwebs v0.0.0-20240613142113-66fc5a284245
	t73f.de/r/zsc v0.0.0-20240531154807-955147c904a3
)

require (
	golang.org/x/sys v0.21.0 // indirect
	t73f.de/r/webs v0.0.0-20240607145146-ac6799458514 // indirect
)












|




|

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
module zettelstore.de/z

go 1.22

require (
	github.com/fsnotify/fsnotify v1.7.0
	github.com/yuin/goldmark v1.7.3
	golang.org/x/crypto v0.24.0
	golang.org/x/term v0.21.0
	golang.org/x/text v0.16.0
	t73f.de/r/sx v0.0.0-20240513163553-ec4fcc6539ca
	t73f.de/r/sxwebs v0.0.0-20240613142113-66fc5a284245
	t73f.de/r/zsc v0.0.0-20240620163129-e0d62ad54c46
)

require (
	golang.org/x/sys v0.21.0 // indirect
	t73f.de/r/webs v0.0.0-20240617100047-8730e9917915 // indirect
)

Changes to go.sum.

10
11
12
13
14
15
16
17
18
19
20
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
t73f.de/r/sx v0.0.0-20240513163553-ec4fcc6539ca h1:vvDqiuUfBLf+t/gpiSyqIFAdvZ7FLigOH38bqMY+v8k=
t73f.de/r/sx v0.0.0-20240513163553-ec4fcc6539ca/go.mod h1:G9pD1j2R6y9ZkPBb81mSnmwaAvTOg7r6jKp/OF7WeFA=
t73f.de/r/sxwebs v0.0.0-20240613142113-66fc5a284245 h1:raE7KUgoGsp2DzXOko9dDXEsSJ/VvoXCDYeICx7i6uo=
t73f.de/r/sxwebs v0.0.0-20240613142113-66fc5a284245/go.mod h1:ErPBVUyE2fOktL/8M7lp/PR93wP/o9RawMajB1uSqj8=
t73f.de/r/webs v0.0.0-20240607145146-ac6799458514 h1:sXTOGkpdB0FT3cphsmj65ZAiX/SntlC0GzLif1EipMY=
t73f.de/r/webs v0.0.0-20240607145146-ac6799458514/go.mod h1:UGAAtul0TK5ACeZ6zTS3SX6GqwMFXxlUpHiV8oqNq5w=
t73f.de/r/zsc v0.0.0-20240531154807-955147c904a3 h1:iY1X8gmZ/YNsFysPQAGVpTOPknI7hxbGhWKxdDgCvr8=
t73f.de/r/zsc v0.0.0-20240531154807-955147c904a3/go.mod h1:YdsjqbI1th0bJoMclgbNycJUVr8ovAV02ZgA91uOhkU=







|
|
|
|
10
11
12
13
14
15
16
17
18
19
20
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
t73f.de/r/sx v0.0.0-20240513163553-ec4fcc6539ca h1:vvDqiuUfBLf+t/gpiSyqIFAdvZ7FLigOH38bqMY+v8k=
t73f.de/r/sx v0.0.0-20240513163553-ec4fcc6539ca/go.mod h1:G9pD1j2R6y9ZkPBb81mSnmwaAvTOg7r6jKp/OF7WeFA=
t73f.de/r/sxwebs v0.0.0-20240613142113-66fc5a284245 h1:raE7KUgoGsp2DzXOko9dDXEsSJ/VvoXCDYeICx7i6uo=
t73f.de/r/sxwebs v0.0.0-20240613142113-66fc5a284245/go.mod h1:ErPBVUyE2fOktL/8M7lp/PR93wP/o9RawMajB1uSqj8=
t73f.de/r/webs v0.0.0-20240617100047-8730e9917915 h1:rwUaPBIH3shrUIkmw51f4RyCplsCU+ISZHailsLiHTE=
t73f.de/r/webs v0.0.0-20240617100047-8730e9917915/go.mod h1:UGAAtul0TK5ACeZ6zTS3SX6GqwMFXxlUpHiV8oqNq5w=
t73f.de/r/zsc v0.0.0-20240620163129-e0d62ad54c46 h1:/CW3S5fWbZnZsJJizzXi0tfWAQdGNSqCCDEMaGTiajk=
t73f.de/r/zsc v0.0.0-20240620163129-e0d62ad54c46/go.mod h1:FH9nouOzCHoR0Nbk6gBK31gGJqQI8dGVXoyGI45yHkM=

Changes to parser/draw/draw.go.

124
125
126
127
128
129
130
131
132
133
134
135
136
			return n
		}
	}
	return defVal
}

func canvasErrMsg(err error) ast.InlineSlice {
	return ast.CreateInlineSliceFromWords("Error:", err.Error())
}

func noSVGErrMsg() ast.InlineSlice {
	return ast.CreateInlineSliceFromWords("NO", "IMAGE")
}







|



|

124
125
126
127
128
129
130
131
132
133
134
135
136
			return n
		}
	}
	return defVal
}

func canvasErrMsg(err error) ast.InlineSlice {
	return ast.InlineSlice{&ast.TextNode{Text: "Error: " + err.Error()}}
}

func noSVGErrMsg() ast.InlineSlice {
	return ast.InlineSlice{&ast.TextNode{Text: "NO IMAGE"}}
}

Changes to parser/markdown/markdown.go.

269
270
271
272
273
274
275




276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
		return p.acceptRawHTML(n)
	}
	panic(fmt.Sprintf("Unhandled inline node %v", node.Kind()))
}

func (p *mdP) acceptText(node *gmAst.Text) ast.InlineSlice {
	segment := node.Segment




	if node.IsRaw() {
		return splitText(string(segment.Value(p.source)))
	}
	ins := splitText(string(segment.Value(p.source)))
	result := make(ast.InlineSlice, 0, len(ins)+1)
	for _, in := range ins {
		if tn, ok := in.(*ast.TextNode); ok {
			tn.Text = cleanText([]byte(tn.Text), true)
		}
		result = append(result, in)
	}
	if node.HardLineBreak() {
		result = append(result, &ast.BreakNode{Hard: true})
	} else if node.SoftLineBreak() {
		result = append(result, &ast.BreakNode{Hard: false})
	}
	return result
}

// splitText transform the text into a sequence of TextNode and SpaceNode
func splitText(text string) ast.InlineSlice {
	if text == "" {
		return nil
	}
	result := make(ast.InlineSlice, 0, 1)

	state := 0 // 0=unknown,1=non-spaces,2=spaces
	lastPos := 0
	for pos, ch := range text {
		if input.IsSpace(ch) {
			if state == 1 {
				result = append(result, &ast.TextNode{Text: text[lastPos:pos]})
				lastPos = pos
			}
			state = 2
		} else {
			if state == 2 {
				result = append(result, &ast.SpaceNode{Lexeme: text[lastPos:pos]})
				lastPos = pos
			}
			state = 1
		}
	}
	switch state {
	case 1:
		result = append(result, &ast.TextNode{Text: text[lastPos:]})
	case 2:
		result = append(result, &ast.SpaceNode{Lexeme: text[lastPos:]})
	default:
		panic(fmt.Sprintf("Unexpected state %v", state))
	}
	return result
}

var ignoreAfterBS = map[byte]struct{}{
	'!': {}, '"': {}, '#': {}, '$': {}, '%': {}, '&': {}, '\'': {}, '(': {},
	')': {}, '*': {}, '+': {}, ',': {}, '-': {}, '.': {}, '/': {}, ':': {},
	';': {}, '<': {}, '=': {}, '>': {}, '?': {}, '@': {}, '[': {}, '\\': {},







>
>
>
>

|

<
|
<
|
<
<
|
<





<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<







269
270
271
272
273
274
275
276
277
278
279
280
281
282

283

284


285

286
287
288
289
290



































291
292
293
294
295
296
297
		return p.acceptRawHTML(n)
	}
	panic(fmt.Sprintf("Unhandled inline node %v", node.Kind()))
}

func (p *mdP) acceptText(node *gmAst.Text) ast.InlineSlice {
	segment := node.Segment
	text := segment.Value(p.source)
	if text == nil {
		return nil
	}
	if node.IsRaw() {
		return ast.InlineSlice{&ast.TextNode{Text: string(text)}}
	}

	result := make(ast.InlineSlice, 0, 2)

	in := &ast.TextNode{Text: cleanText(text, true)}


	result = append(result, in)

	if node.HardLineBreak() {
		result = append(result, &ast.BreakNode{Hard: true})
	} else if node.SoftLineBreak() {
		result = append(result, &ast.BreakNode{Hard: false})
	}



































	return result
}

var ignoreAfterBS = map[byte]struct{}{
	'!': {}, '"': {}, '#': {}, '$': {}, '%': {}, '&': {}, '\'': {}, '(': {},
	')': {}, '*': {}, '+': {}, ',': {}, '-': {}, '.': {}, '/': {}, ':': {},
	';': {}, '<': {}, '=': {}, '>': {}, '?': {}, '@': {}, '[': {}, '\\': {},

Deleted parser/markdown/markdown_test.go.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
//-----------------------------------------------------------------------------
// Copyright (c) 2020-present Detlef Stern
//
// This file is part of Zettelstore.
//
// Zettelstore is licensed under the latest version of the EUPL (European Union
// Public License). Please see file LICENSE.txt for your rights and obligations
// under this license.
//
// SPDX-License-Identifier: EUPL-1.2
// SPDX-FileCopyrightText: 2020-present Detlef Stern
//-----------------------------------------------------------------------------

package markdown

import (
	"strings"
	"testing"

	"zettelstore.de/z/ast"
)

func TestSplitText(t *testing.T) {
	t.Parallel()
	var testcases = []struct {
		text string
		exp  string
	}{
		{"", ""},
		{"abc", "Tabc"},
		{" ", "S "},
		{"abc def", "TabcS Tdef"},
		{"abc def ", "TabcS TdefS "},
		{" abc def ", "S TabcS TdefS "},
	}
	for i, tc := range testcases {
		var sb strings.Builder
		for _, in := range splitText(tc.text) {
			switch n := in.(type) {
			case *ast.TextNode:
				sb.WriteByte('T')
				sb.WriteString(n.Text)
			case *ast.SpaceNode:
				sb.WriteByte('S')
				sb.WriteString(n.Lexeme)
			default:
				sb.WriteByte('Q')
			}
		}
		got := sb.String()
		if tc.exp != got {
			t.Errorf("TC=%d, text=%q, exp=%q, got=%q", i, tc.text, tc.exp, got)
		}
	}
}
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<














































































































Changes to parser/parser.go.

115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
func ParseMetadata(value string) ast.InlineSlice {
	return ParseInlines(input.NewInput([]byte(value)), meta.SyntaxZmk)
}

// ParseSpacedText returns an inline slice that consists just of test and space node.
// No Zettelmarkup parsing is done. It is typically used to transform the zettel title into an inline slice.
func ParseSpacedText(s string) ast.InlineSlice {
	return ast.CreateInlineSliceFromWords(meta.ListFromValue(s)...)
}

// NormalizedSpacedText returns the given string, but normalize multiple spaces to one space.
func NormalizedSpacedText(s string) string { return strings.Join(meta.ListFromValue(s), " ") }

// ParseDescription returns a suitable description stored in the metadata as an inline slice.
// This is done for an image in most cases.
func ParseDescription(m *meta.Meta) ast.InlineSlice {
	if m == nil {
		return nil
	}
	if descr, found := m.Get(api.KeySummary); found {
		in := ParseMetadata(descr)
		cleaner.CleanInlineLinks(&in)
		return in
	}
	if title, found := m.Get(api.KeyTitle); found {
		return ParseSpacedText(title)
	}
	return ast.CreateInlineSliceFromWords("Zettel", "without", "title:", m.Zid.String())
}

// ParseZettel parses the zettel based on the syntax.
func ParseZettel(ctx context.Context, zettel zettel.Zettel, syntax string, rtConfig config.Config) *ast.ZettelNode {
	m := zettel.Meta
	inhMeta := m
	if rtConfig != nil {







|



















|







115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
func ParseMetadata(value string) ast.InlineSlice {
	return ParseInlines(input.NewInput([]byte(value)), meta.SyntaxZmk)
}

// ParseSpacedText returns an inline slice that consists just of test and space node.
// No Zettelmarkup parsing is done. It is typically used to transform the zettel title into an inline slice.
func ParseSpacedText(s string) ast.InlineSlice {
	return ast.InlineSlice{&ast.TextNode{Text: strings.Join(meta.ListFromValue(s), " ")}}
}

// NormalizedSpacedText returns the given string, but normalize multiple spaces to one space.
func NormalizedSpacedText(s string) string { return strings.Join(meta.ListFromValue(s), " ") }

// ParseDescription returns a suitable description stored in the metadata as an inline slice.
// This is done for an image in most cases.
func ParseDescription(m *meta.Meta) ast.InlineSlice {
	if m == nil {
		return nil
	}
	if descr, found := m.Get(api.KeySummary); found {
		in := ParseMetadata(descr)
		cleaner.CleanInlineLinks(&in)
		return in
	}
	if title, found := m.Get(api.KeyTitle); found {
		return ParseSpacedText(title)
	}
	return ast.InlineSlice{&ast.TextNode{Text: "Zettel without title: " + m.Zid.String()}}
}

// ParseZettel parses the zettel based on the syntax.
func ParseZettel(ctx context.Context, zettel zettel.Zettel, syntax string, rtConfig config.Config) *ast.ZettelNode {
	m := zettel.Meta
	inhMeta := m
	if rtConfig != nil {

Changes to parser/zettelmark/block.go.

110
111
112
113
114
115
116
117
118






119





120
121
122
123
124
125
126
}

func startsWithSpaceSoftBreak(pn *ast.ParaNode) bool {
	ins := pn.Inlines
	if len(ins) < 2 {
		return false
	}
	_, isSpace := ins[0].(*ast.SpaceNode)
	_, isBreak := ins[1].(*ast.BreakNode)






	return isSpace && isBreak





}

func (cp *zmkP) cleanupListsAfterEOL() {
	for _, l := range cp.lists {
		if lits := len(l.Items); lits > 0 {
			l.Items[lits-1] = append(l.Items[lits-1], &nullItemNode{})
		}







<

>
>
>
>
>
>
|
>
>
>
>
>







110
111
112
113
114
115
116

117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
}

func startsWithSpaceSoftBreak(pn *ast.ParaNode) bool {
	ins := pn.Inlines
	if len(ins) < 2 {
		return false
	}

	_, isBreak := ins[1].(*ast.BreakNode)
	return isBreak && isSpaceText(ins[0])
}
func isSpaceText(node ast.InlineNode) bool {
	if tn, isText := node.(*ast.TextNode); isText {
		for _, ch := range tn.Text {
			if !input.IsSpace(ch) {
				return false
			}
		}
		return true
	}
	return false
}

func (cp *zmkP) cleanupListsAfterEOL() {
	for _, l := range cp.lists {
		if lits := len(l.Items); lits > 0 {
			l.Items[lits-1] = append(l.Items[lits-1], &nullItemNode{})
		}

Changes to parser/zettelmark/inline.go.

47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
		var in ast.InlineNode
		success := false
		switch inp.Ch {
		case input.EOS:
			return nil
		case '\n', '\r':
			return cp.parseSoftBreak()
		case ' ', '\t':
			return cp.parseSpace()
		case '[':
			inp.Next()
			switch inp.Ch {
			case '[':
				in, success = cp.parseLink()
			case '@':
				in, success = cp.parseCite()







<
<







47
48
49
50
51
52
53


54
55
56
57
58
59
60
		var in ast.InlineNode
		success := false
		switch inp.Ch {
		case input.EOS:
			return nil
		case '\n', '\r':
			return cp.parseSoftBreak()


		case '[':
			inp.Next()
			switch inp.Ch {
			case '[':
				in, success = cp.parseLink()
			case '@':
				in, success = cp.parseCite()
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
		return cp.parseBackslashRest()
	}
	for {
		inp.Next()
		switch inp.Ch {
		// The following case must contain all runes that occur in parseInline!
		// Plus the closing brackets ] and } and ) and the middle |
		case input.EOS, '\n', '\r', ' ', '\t', '[', ']', '{', '}', '(', ')', '|', '%', '_', '*', '>', '~', '^', ',', '"', '#', ':', '\'', '@', '`', runeModGrave, '$', '=', '\\', '-', '&':
			return &ast.TextNode{Text: string(inp.Src[pos:inp.Pos])}
		}
	}
}

func (cp *zmkP) parseBackslash() ast.InlineNode {
	inp := cp.inp







|







99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
		return cp.parseBackslashRest()
	}
	for {
		inp.Next()
		switch inp.Ch {
		// The following case must contain all runes that occur in parseInline!
		// Plus the closing brackets ] and } and ) and the middle |
		case input.EOS, '\n', '\r', '[', ']', '{', '}', '(', ')', '|', '%', '_', '*', '>', '~', '^', ',', '"', '#', ':', '\'', '@', '`', runeModGrave, '$', '=', '\\', '-', '&':
			return &ast.TextNode{Text: string(inp.Src[pos:inp.Pos])}
		}
	}
}

func (cp *zmkP) parseBackslash() ast.InlineNode {
	inp := cp.inp
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
		return &ast.TextNode{Text: "\u00a0"}
	}
	pos := inp.Pos
	inp.Next()
	return &ast.TextNode{Text: string(inp.Src[pos:inp.Pos])}
}

func (cp *zmkP) parseSpace() *ast.SpaceNode {
	inp := cp.inp
	pos := inp.Pos
	for {
		inp.Next()
		switch inp.Ch {
		case ' ', '\t':
		default:
			return &ast.SpaceNode{Lexeme: string(inp.Src[pos:inp.Pos])}
		}
	}
}

func (cp *zmkP) parseSoftBreak() *ast.BreakNode {
	cp.inp.EatEOL()
	return &ast.BreakNode{}
}

func (cp *zmkP) parseLink() (*ast.LinkNode, bool) {
	if ref, is, ok := cp.parseReference('[', ']'); ok {







<
<
<
<
<
<
<
<
<
<
<
<
<







131
132
133
134
135
136
137













138
139
140
141
142
143
144
		return &ast.TextNode{Text: "\u00a0"}
	}
	pos := inp.Pos
	inp.Next()
	return &ast.TextNode{Text: string(inp.Src[pos:inp.Pos])}
}














func (cp *zmkP) parseSoftBreak() *ast.BreakNode {
	cp.inp.EatEOL()
	return &ast.BreakNode{}
}

func (cp *zmkP) parseLink() (*ast.LinkNode, bool) {
	if ref, is, ok := cp.parseReference('[', ']'); ok {

Changes to parser/zettelmark/post-processor.go.

72
73
74
75
76
77
78

79
80
81
82
83
84
85
86
87
88
89

func (pp *postProcessor) visitRegion(rn *ast.RegionNode) {
	oldVerse := pp.inVerse
	if rn.Kind == ast.RegionVerse {
		pp.inVerse = true
	}
	pp.visitBlockSlice(&rn.Blocks)

	if len(rn.Inlines) > 0 {
		pp.visitInlineSlice(&rn.Inlines)
	}
	pp.inVerse = oldVerse
}

func (pp *postProcessor) visitNestedList(ln *ast.NestedListNode) {
	for i, item := range ln.Items {
		ln.Items[i] = pp.processItemSlice(item)
	}
	if ln.Kind != ast.NestedListQuote {







>



<







72
73
74
75
76
77
78
79
80
81
82

83
84
85
86
87
88
89

func (pp *postProcessor) visitRegion(rn *ast.RegionNode) {
	oldVerse := pp.inVerse
	if rn.Kind == ast.RegionVerse {
		pp.inVerse = true
	}
	pp.visitBlockSlice(&rn.Blocks)
	pp.inVerse = oldVerse
	if len(rn.Inlines) > 0 {
		pp.visitInlineSlice(&rn.Inlines)
	}

}

func (pp *postProcessor) visitNestedList(ln *ast.NestedListNode) {
	for i, item := range ln.Items {
		ln.Items[i] = pp.processItemSlice(item)
	}
	if ln.Kind != ast.NestedListQuote {
364
365
366
367
368
369
370
371
372
373
374
375
376





377
378
379
380
381
382
383
}

// processInlineSliceHead removes leading spaces and empty text.
func (pp *postProcessor) processInlineSliceHead(is *ast.InlineSlice) {
	ins := *is
	for i, in := range ins {
		switch in := in.(type) {
		case *ast.SpaceNode:
			if pp.inVerse {
				*is = ins[i:]
				return
			}
		case *ast.TextNode:





			if len(in.Text) > 0 {
				*is = ins[i:]
				return
			}
		default:
			*is = ins[i:]
			return







|
|



|
>
>
>
>
>







364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
}

// processInlineSliceHead removes leading spaces and empty text.
func (pp *postProcessor) processInlineSliceHead(is *ast.InlineSlice) {
	ins := *is
	for i, in := range ins {
		switch in := in.(type) {
		case *ast.TextNode:
			if pp.inVerse && len(in.Text) > 0 {
				*is = ins[i:]
				return
			}
			for len(in.Text) > 0 {
				if ch := in.Text[0]; ch != ' ' && ch != '\t' {
					break
				}
				in.Text = in.Text[1:]
			}
			if len(in.Text) > 0 {
				*is = ins[i:]
				return
			}
		default:
			*is = ins[i:]
			return
406
407
408
409
410
411
412




413









414








415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465

466
467
468
469
470
471
472
473
474
475
476
477
478










	ins := *is
	fromPos, toPos := 0, 0
	for fromPos < maxPos {
		ins[toPos] = ins[fromPos]
		fromPos++
		switch in := ins[toPos].(type) {
		case *ast.TextNode:




			fromPos = processTextNode(ins, maxPos, in, fromPos)









		case *ast.SpaceNode:








			if pp.inVerse {
				in.Lexeme = strings.Repeat("\u00a0", in.Count())
			}
			fromPos = processSpaceNode(ins, maxPos, in, toPos, fromPos)
		case *ast.BreakNode:
			if pp.inVerse {
				in.Hard = true
			}
		}
		toPos++
	}
	return toPos
}

func processTextNode(ins ast.InlineSlice, maxPos int, in *ast.TextNode, fromPos int) int {
	for fromPos < maxPos {
		if tn, ok := ins[fromPos].(*ast.TextNode); ok {
			in.Text = in.Text + tn.Text
			fromPos++
		} else {
			break
		}
	}
	return fromPos
}

func processSpaceNode(ins ast.InlineSlice, maxPos int, in *ast.SpaceNode, toPos, fromPos int) int {
	if fromPos < maxPos {
		switch nn := ins[fromPos].(type) {
		case *ast.BreakNode:
			if in.Count() > 1 {
				nn.Hard = true
				ins[toPos] = nn
				fromPos++
			}
		case *ast.LiteralNode:
			if nn.Kind == ast.LiteralComment {
				ins[toPos] = ins[fromPos]
				fromPos++
			}
		}
	}
	return fromPos
}

// processInlineSliceTail removes empty text nodes, breaks and spaces at the end.
func (*postProcessor) processInlineSliceTail(is *ast.InlineSlice, toPos int) int {
	ins := *is
	for toPos > 0 {
		switch n := ins[toPos-1].(type) {
		case *ast.TextNode:

			if len(n.Text) > 0 {
				return toPos
			}
		case *ast.BreakNode:
		case *ast.SpaceNode:
		default:
			return toPos
		}
		toPos--
		ins[toPos] = nil // Kill node to enable garbage collection
	}
	return toPos
}

















>
>
>
>
|
>
>
>
>
>
>
>
>
>
|
>
>
>
>
>
>
>
>

|

<










<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<






>




<








>
>
>
>
>
>
>
>
>
>
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443

444
445
446
447
448
449
450
451
452
453































454
455
456
457
458
459
460
461
462
463
464

465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
	ins := *is
	fromPos, toPos := 0, 0
	for fromPos < maxPos {
		ins[toPos] = ins[fromPos]
		fromPos++
		switch in := ins[toPos].(type) {
		case *ast.TextNode:
			// Merge following TextNodes
			for fromPos < maxPos {
				if tn, ok := ins[fromPos].(*ast.TextNode); ok {
					in.Text = in.Text + tn.Text
					fromPos++
				} else {
					break
				}
			}
			if in.Text == "" {
				continue
			}
			if ch := in.Text[len(in.Text)-1]; ch == ' ' && fromPos < maxPos {
				switch nn := ins[fromPos].(type) {
				case *ast.BreakNode:
					nn.Hard = true
					in.Text = removeTrailingSpaces(in.Text)
				case *ast.LiteralNode:
					if nn.Kind == ast.LiteralComment {
						in.Text = removeTrailingSpaces(in.Text)
					}
				}
			}
			if pp.inVerse {
				in.Text = strings.ReplaceAll(in.Text, " ", "\u00a0")
			}

		case *ast.BreakNode:
			if pp.inVerse {
				in.Hard = true
			}
		}
		toPos++
	}
	return toPos
}
































// processInlineSliceTail removes empty text nodes, breaks and spaces at the end.
func (*postProcessor) processInlineSliceTail(is *ast.InlineSlice, toPos int) int {
	ins := *is
	for toPos > 0 {
		switch n := ins[toPos-1].(type) {
		case *ast.TextNode:
			n.Text = removeTrailingSpaces(n.Text)
			if len(n.Text) > 0 {
				return toPos
			}
		case *ast.BreakNode:

		default:
			return toPos
		}
		toPos--
		ins[toPos] = nil // Kill node to enable garbage collection
	}
	return toPos
}

func removeTrailingSpaces(s string) string {
	for len(s) > 0 {
		if ch := s[len(s)-1]; ch != ' ' && ch != '\t' {
			return s
		}
		s = s[0 : len(s)-1]
	}
	return ""
}

Changes to parser/zettelmark/zettelmark_test.go.

70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
	})
}

func TestText(t *testing.T) {
	t.Parallel()
	checkTcs(t, TestCases{
		{"abcd", "(PARA abcd)"},
		{"ab cd", "(PARA ab SP cd)"},
		{"abcd ", "(PARA abcd)"},
		{" abcd", "(PARA abcd)"},
		{"\\", "(PARA \\)"},
		{"\\\n", ""},
		{"\\\ndef", "(PARA HB def)"},
		{"\\\r", ""},
		{"\\\rdef", "(PARA HB def)"},
		{"\\\r\n", ""},
		{"\\\r\ndef", "(PARA HB def)"},
		{"\\a", "(PARA a)"},
		{"\\aa", "(PARA aa)"},
		{"a\\a", "(PARA aa)"},
		{"\\+", "(PARA +)"},
		{"\\ ", "(PARA \u00a0)"},
		{"http://a, http://b", "(PARA http://a, SP http://b)"},
	})
}

func TestSpace(t *testing.T) {
	t.Parallel()
	checkTcs(t, TestCases{
		{" ", ""},







|














|







70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
	})
}

func TestText(t *testing.T) {
	t.Parallel()
	checkTcs(t, TestCases{
		{"abcd", "(PARA abcd)"},
		{"ab cd", "(PARA ab cd)"},
		{"abcd ", "(PARA abcd)"},
		{" abcd", "(PARA abcd)"},
		{"\\", "(PARA \\)"},
		{"\\\n", ""},
		{"\\\ndef", "(PARA HB def)"},
		{"\\\r", ""},
		{"\\\rdef", "(PARA HB def)"},
		{"\\\r\n", ""},
		{"\\\r\ndef", "(PARA HB def)"},
		{"\\a", "(PARA a)"},
		{"\\aa", "(PARA aa)"},
		{"a\\a", "(PARA aa)"},
		{"\\+", "(PARA +)"},
		{"\\ ", "(PARA \u00a0)"},
		{"http://a, http://b", "(PARA http://a, http://b)"},
	})
}

func TestSpace(t *testing.T) {
	t.Parallel()
	checkTcs(t, TestCases{
		{" ", ""},
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
		{"[", "(PARA [)"},
		{"[[", "(PARA [[)"},
		{"[[|", "(PARA [[|)"},
		{"[[]", "(PARA [[])"},
		{"[[|]", "(PARA [[|])"},
		{"[[]]", "(PARA [[]])"},
		{"[[|]]", "(PARA [[|]])"},
		{"[[ ]]", "(PARA [[ SP ]])"},
		{"[[\n]]", "(PARA [[ SB ]])"},
		{"[[ a]]", "(PARA (LINK a))"},
		{"[[a ]]", "(PARA [[a SP ]])"},
		{"[[a\n]]", "(PARA [[a SB ]])"},
		{"[[a]]", "(PARA (LINK a))"},
		{"[[12345678901234]]", "(PARA (LINK 12345678901234))"},
		{"[[a]", "(PARA [[a])"},
		{"[[|a]]", "(PARA [[|a]])"},
		{"[[b|]]", "(PARA [[b|]])"},
		{"[[b|a]]", "(PARA (LINK a b))"},
		{"[[b| a]]", "(PARA (LINK a b))"},
		{"[[b%c|a]]", "(PARA (LINK a b%c))"},
		{"[[b%%c|a]]", "(PARA [[b {% c|a]]})"},
		{"[[b|a]", "(PARA [[b|a])"},
		{"[[b\nc|a]]", "(PARA (LINK a b SB c))"},
		{"[[b c|a#n]]", "(PARA (LINK a#n b SP c))"},
		{"[[a]]go", "(PARA (LINK a) go)"},
		{"[[b|a]]{go}", "(PARA (LINK a b)[ATTR go])"},
		{"[[[[a]]|b]]", "(PARA [[ (LINK a) |b]])"},
		{"[[a[b]c|d]]", "(PARA (LINK d a[b]c))"},
		{"[[[b]c|d]]", "(PARA [ (LINK d b]c))"},
		{"[[a[]c|d]]", "(PARA (LINK d a[]c))"},
		{"[[a[b]|d]]", "(PARA (LINK d a[b]))"},







|


|












|







128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
		{"[", "(PARA [)"},
		{"[[", "(PARA [[)"},
		{"[[|", "(PARA [[|)"},
		{"[[]", "(PARA [[])"},
		{"[[|]", "(PARA [[|])"},
		{"[[]]", "(PARA [[]])"},
		{"[[|]]", "(PARA [[|]])"},
		{"[[ ]]", "(PARA [[ ]])"},
		{"[[\n]]", "(PARA [[ SB ]])"},
		{"[[ a]]", "(PARA (LINK a))"},
		{"[[a ]]", "(PARA [[a ]])"},
		{"[[a\n]]", "(PARA [[a SB ]])"},
		{"[[a]]", "(PARA (LINK a))"},
		{"[[12345678901234]]", "(PARA (LINK 12345678901234))"},
		{"[[a]", "(PARA [[a])"},
		{"[[|a]]", "(PARA [[|a]])"},
		{"[[b|]]", "(PARA [[b|]])"},
		{"[[b|a]]", "(PARA (LINK a b))"},
		{"[[b| a]]", "(PARA (LINK a b))"},
		{"[[b%c|a]]", "(PARA (LINK a b%c))"},
		{"[[b%%c|a]]", "(PARA [[b {% c|a]]})"},
		{"[[b|a]", "(PARA [[b|a])"},
		{"[[b\nc|a]]", "(PARA (LINK a b SB c))"},
		{"[[b c|a#n]]", "(PARA (LINK a#n b c))"},
		{"[[a]]go", "(PARA (LINK a) go)"},
		{"[[b|a]]{go}", "(PARA (LINK a b)[ATTR go])"},
		{"[[[[a]]|b]]", "(PARA [[ (LINK a) |b]])"},
		{"[[a[b]c|d]]", "(PARA (LINK d a[b]c))"},
		{"[[[b]c|d]]", "(PARA [ (LINK d b]c))"},
		{"[[a[]c|d]]", "(PARA (LINK d a[]c))"},
		{"[[a[b]|d]]", "(PARA (LINK d a[b]))"},
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192

func TestCite(t *testing.T) {
	t.Parallel()
	checkTcs(t, TestCases{
		{"[@", "(PARA [@)"},
		{"[@]", "(PARA [@])"},
		{"[@a]", "(PARA (CITE a))"},
		{"[@ a]", "(PARA [@ SP a])"},
		{"[@a ]", "(PARA (CITE a))"},
		{"[@a\n]", "(PARA (CITE a))"},
		{"[@a\nx]", "(PARA (CITE a SB x))"},
		{"[@a\n\n]", "(PARA [@a)(PARA ])"},
		{"[@a,\n]", "(PARA (CITE a))"},
		{"[@a,n]", "(PARA (CITE a n))"},
		{"[@a| n]", "(PARA (CITE a n))"},







|







178
179
180
181
182
183
184
185
186
187
188
189
190
191
192

func TestCite(t *testing.T) {
	t.Parallel()
	checkTcs(t, TestCases{
		{"[@", "(PARA [@)"},
		{"[@]", "(PARA [@])"},
		{"[@a]", "(PARA (CITE a))"},
		{"[@ a]", "(PARA [@ a])"},
		{"[@a ]", "(PARA (CITE a))"},
		{"[@a\n]", "(PARA (CITE a))"},
		{"[@a\nx]", "(PARA (CITE a SB x))"},
		{"[@a\n\n]", "(PARA [@a)(PARA ])"},
		{"[@a,\n]", "(PARA (CITE a))"},
		{"[@a,n]", "(PARA (CITE a n))"},
		{"[@a| n]", "(PARA (CITE a n))"},
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
		{"{", "(PARA {)"},
		{"{{", "(PARA {{)"},
		{"{{|", "(PARA {{|)"},
		{"{{}", "(PARA {{})"},
		{"{{|}", "(PARA {{|})"},
		{"{{}}", "(PARA {{}})"},
		{"{{|}}", "(PARA {{|}})"},
		{"{{ }}", "(PARA {{ SP }})"},
		{"{{\n}}", "(PARA {{ SB }})"},
		{"{{a }}", "(PARA {{a SP }})"},
		{"{{a\n}}", "(PARA {{a SB }})"},
		{"{{a}}", "(PARA (EMBED a))"},
		{"{{12345678901234}}", "(PARA (EMBED 12345678901234))"},
		{"{{ a}}", "(PARA (EMBED a))"},
		{"{{a}", "(PARA {{a})"},
		{"{{|a}}", "(PARA {{|a}})"},
		{"{{b|}}", "(PARA {{b|}})"},
		{"{{b|a}}", "(PARA (EMBED a b))"},
		{"{{b| a}}", "(PARA (EMBED a b))"},
		{"{{b|a}", "(PARA {{b|a})"},
		{"{{b\nc|a}}", "(PARA (EMBED a b SB c))"},
		{"{{b c|a#n}}", "(PARA (EMBED a#n b SP c))"},
		{"{{a}}{go}", "(PARA (EMBED a)[ATTR go])"},
		{"{{{{a}}|b}}", "(PARA {{ (EMBED a) |b}})"},
		{"{{\\|}}", "(PARA (EMBED %5C%7C))"},
		{"{{\\||a}}", "(PARA (EMBED a |))"},
		{"{{b\\||a}}", "(PARA (EMBED a b|))"},
		{"{{b\\|c|a}}", "(PARA (EMBED a b|c))"},
		{"{{\\}}}", "(PARA (EMBED %5C%7D))"},







|

|











|







216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
		{"{", "(PARA {)"},
		{"{{", "(PARA {{)"},
		{"{{|", "(PARA {{|)"},
		{"{{}", "(PARA {{})"},
		{"{{|}", "(PARA {{|})"},
		{"{{}}", "(PARA {{}})"},
		{"{{|}}", "(PARA {{|}})"},
		{"{{ }}", "(PARA {{ }})"},
		{"{{\n}}", "(PARA {{ SB }})"},
		{"{{a }}", "(PARA {{a }})"},
		{"{{a\n}}", "(PARA {{a SB }})"},
		{"{{a}}", "(PARA (EMBED a))"},
		{"{{12345678901234}}", "(PARA (EMBED 12345678901234))"},
		{"{{ a}}", "(PARA (EMBED a))"},
		{"{{a}", "(PARA {{a})"},
		{"{{|a}}", "(PARA {{|a}})"},
		{"{{b|}}", "(PARA {{b|}})"},
		{"{{b|a}}", "(PARA (EMBED a b))"},
		{"{{b| a}}", "(PARA (EMBED a b))"},
		{"{{b|a}", "(PARA {{b|a})"},
		{"{{b\nc|a}}", "(PARA (EMBED a b SB c))"},
		{"{{b c|a#n}}", "(PARA (EMBED a#n b c))"},
		{"{{a}}{go}", "(PARA (EMBED a)[ATTR go])"},
		{"{{{{a}}|b}}", "(PARA {{ (EMBED a) |b}})"},
		{"{{\\|}}", "(PARA (EMBED %5C%7C))"},
		{"{{\\||a}}", "(PARA (EMBED a |))"},
		{"{{b\\||a}}", "(PARA (EMBED a b|))"},
		{"{{b\\|c|a}}", "(PARA (EMBED a b|c))"},
		{"{{\\}}}", "(PARA (EMBED %5C%7D))"},
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
func TestMark(t *testing.T) {
	t.Parallel()
	checkTcs(t, TestCases{
		{"[!", "(PARA [!)"},
		{"[!\n", "(PARA [!)"},
		{"[!]", "(PARA (MARK #*))"},
		{"[!][!]", "(PARA (MARK #*) (MARK #*-1))"},
		{"[! ]", "(PARA [! SP ])"},
		{"[!a]", "(PARA (MARK \"a\" #a))"},
		{"[!a][!a]", "(PARA (MARK \"a\" #a) (MARK \"a\" #a-1))"},
		{"[!a ]", "(PARA [!a SP ])"},
		{"[!a_]", "(PARA (MARK \"a_\" #a))"},
		{"[!a_][!a]", "(PARA (MARK \"a_\" #a) (MARK \"a\" #a-1))"},
		{"[!a-b]", "(PARA (MARK \"a-b\" #a-b))"},
		{"[!a|b]", "(PARA (MARK \"a\" #a b))"},
		{"[!a|]", "(PARA (MARK \"a\" #a))"},
		{"[!|b]", "(PARA (MARK #* b))"},
		{"[!|b c]", "(PARA (MARK #* b SP c))"},
	})
}

func TestComment(t *testing.T) {
	t.Parallel()
	checkTcs(t, TestCases{
		{"%", "(PARA %)"},







|


|






|







254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
func TestMark(t *testing.T) {
	t.Parallel()
	checkTcs(t, TestCases{
		{"[!", "(PARA [!)"},
		{"[!\n", "(PARA [!)"},
		{"[!]", "(PARA (MARK #*))"},
		{"[!][!]", "(PARA (MARK #*) (MARK #*-1))"},
		{"[! ]", "(PARA [! ])"},
		{"[!a]", "(PARA (MARK \"a\" #a))"},
		{"[!a][!a]", "(PARA (MARK \"a\" #a) (MARK \"a\" #a-1))"},
		{"[!a ]", "(PARA [!a ])"},
		{"[!a_]", "(PARA (MARK \"a_\" #a))"},
		{"[!a_][!a]", "(PARA (MARK \"a_\" #a) (MARK \"a\" #a-1))"},
		{"[!a-b]", "(PARA (MARK \"a-b\" #a-b))"},
		{"[!a|b]", "(PARA (MARK \"a\" #a b))"},
		{"[!a|]", "(PARA (MARK \"a\" #a))"},
		{"[!|b]", "(PARA (MARK #* b))"},
		{"[!|b c]", "(PARA (MARK #* b c))"},
	})
}

func TestComment(t *testing.T) {
	t.Parallel()
	checkTcs(t, TestCases{
		{"%", "(PARA %)"},
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
		// Good cases
		{"&lt;", "(PARA <)"},
		{"&#48;", "(PARA 0)"},
		{"&#x4A;", "(PARA J)"},
		{"&#X4a;", "(PARA J)"},
		{"&hellip;", "(PARA \u2026)"},
		{"&nbsp;", "(PARA \u00a0)"},
		{"E: &amp;,&#63;;&#x63;.", "(PARA E: SP &,?;c.)"},
	})
}

func TestVerbatimZettel(t *testing.T) {
	t.Parallel()
	checkTcs(t, TestCases{
		{"@@@\n@@@", "(ZETTEL)"},







|







413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
		// Good cases
		{"&lt;", "(PARA <)"},
		{"&#48;", "(PARA 0)"},
		{"&#x4A;", "(PARA J)"},
		{"&#X4a;", "(PARA J)"},
		{"&hellip;", "(PARA \u2026)"},
		{"&nbsp;", "(PARA \u00a0)"},
		{"E: &amp;,&#63;;&#x63;.", "(PARA E: &,?;c.)"},
	})
}

func TestVerbatimZettel(t *testing.T) {
	t.Parallel()
	checkTcs(t, TestCases{
		{"@@@\n@@@", "(ZETTEL)"},
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
	}))
}

func TestHeading(t *testing.T) {
	t.Parallel()
	checkTcs(t, TestCases{
		{"=h", "(PARA =h)"},
		{"= h", "(PARA = SP h)"},
		{"==h", "(PARA ==h)"},
		{"== h", "(PARA == SP h)"},
		{"===h", "(PARA ===h)"},
		{"=== h", "(H1 h #h)"},
		{"===  h", "(H1 h #h)"},
		{"==== h", "(H2 h #h)"},
		{"===== h", "(H3 h #h)"},
		{"====== h", "(H4 h #h)"},
		{"======= h", "(H5 h #h)"},
		{"======== h", "(H5 h #h)"},
		{"=", "(PARA =)"},
		{"=== h=__=a__", "(H1 h= {_ =a} #h-a)"},
		{"=\n", "(PARA =)"},
		{"a=", "(PARA a=)"},
		{" =", "(PARA =)"},
		{"=== h\na", "(H1 h #h)(PARA a)"},
		{"=== h i {-}", "(H1 h SP i #h-i)[ATTR -]"},
		{"=== h {{a}}", "(H1 h SP (EMBED a) #h)"},
		{"=== h{{a}}", "(H1 h (EMBED a) #h)"},
		{"=== {{a}}", "(H1 (EMBED a))"},
		{"=== h {{a}}{-}", "(H1 h SP (EMBED a)[ATTR -] #h)"},
		{"=== h {{a}} {-}", "(H1 h SP (EMBED a) #h)[ATTR -]"},
		{"=== h {-}{{a}}", "(H1 h #h)[ATTR -]"},
		{"=== h{id=abc}", "(H1 h #h)[ATTR id=abc]"},
		{"=== h\n=== h", "(H1 h #h)(H1 h #h-1)"},
	})
}

func TestHRule(t *testing.T) {







|

|














|
|


|
|







523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
	}))
}

func TestHeading(t *testing.T) {
	t.Parallel()
	checkTcs(t, TestCases{
		{"=h", "(PARA =h)"},
		{"= h", "(PARA = h)"},
		{"==h", "(PARA ==h)"},
		{"== h", "(PARA == h)"},
		{"===h", "(PARA ===h)"},
		{"=== h", "(H1 h #h)"},
		{"===  h", "(H1 h #h)"},
		{"==== h", "(H2 h #h)"},
		{"===== h", "(H3 h #h)"},
		{"====== h", "(H4 h #h)"},
		{"======= h", "(H5 h #h)"},
		{"======== h", "(H5 h #h)"},
		{"=", "(PARA =)"},
		{"=== h=__=a__", "(H1 h= {_ =a} #h-a)"},
		{"=\n", "(PARA =)"},
		{"a=", "(PARA a=)"},
		{" =", "(PARA =)"},
		{"=== h\na", "(H1 h #h)(PARA a)"},
		{"=== h i {-}", "(H1 h i #h-i)[ATTR -]"},
		{"=== h {{a}}", "(H1 h  (EMBED a) #h)"},
		{"=== h{{a}}", "(H1 h (EMBED a) #h)"},
		{"=== {{a}}", "(H1 (EMBED a))"},
		{"=== h {{a}}{-}", "(H1 h  (EMBED a)[ATTR -] #h)"},
		{"=== h {{a}} {-}", "(H1 h  (EMBED a) #h)[ATTR -]"},
		{"=== h {-}{{a}}", "(H1 h #h)[ATTR -]"},
		{"=== h{id=abc}", "(H1 h #h)[ATTR id=abc]"},
		{"=== h\n=== h", "(H1 h #h)(H1 h #h-1)"},
	})
}

func TestHRule(t *testing.T) {
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
		{">", "(QL {})"},
	})
}

func TestQuoteList(t *testing.T) {
	t.Parallel()
	checkTcs(t, TestCases{
		{"> w1 w2", "(QL {(PARA w1 SP w2)})"},
		{"> w1\n> w2", "(QL {(PARA w1 SB w2)})"},
		{"> w1\n>\n>w2", "(QL {(PARA w1)} {})(PARA >w2)"},
	})
}

func TestEnumAfterPara(t *testing.T) {
	t.Parallel()







|







617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
		{">", "(QL {})"},
	})
}

func TestQuoteList(t *testing.T) {
	t.Parallel()
	checkTcs(t, TestCases{
		{"> w1 w2", "(QL {(PARA w1 w2)})"},
		{"> w1\n> w2", "(QL {(PARA w1 SB w2)})"},
		{"> w1\n>\n>w2", "(QL {(PARA w1)} {})(PARA >w2)"},
	})
}

func TestEnumAfterPara(t *testing.T) {
	t.Parallel()
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
		{"; ", "(PARA ;)"},
		{"; abc", "(DL (DT abc))"},
		{"; abc\ndef", "(DL (DT abc))(PARA def)"},
		{"; abc\n def", "(DL (DT abc))(PARA def)"},
		{"; abc\n  def", "(DL (DT abc SB def))"},
		{":", "(PARA :)"},
		{": ", "(PARA :)"},
		{": abc", "(PARA : SP abc)"},
		{"; abc\n: def", "(DL (DT abc) (DD (PARA def)))"},
		{"; abc\n: def\nghi", "(DL (DT abc) (DD (PARA def)))(PARA ghi)"},
		{"; abc\n: def\n ghi", "(DL (DT abc) (DD (PARA def)))(PARA ghi)"},
		{"; abc\n: def\n  ghi", "(DL (DT abc) (DD (PARA def SB ghi)))"},
		{"; abc\n: def\n\n  ghi", "(DL (DT abc) (DD (PARA def)(PARA ghi)))"},
		{"; abc\n:", "(DL (DT abc))(PARA :)"},
		{"; abc\n: def\n: ghi", "(DL (DT abc) (DD (PARA def)) (DD (PARA ghi)))"},







|







642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
		{"; ", "(PARA ;)"},
		{"; abc", "(DL (DT abc))"},
		{"; abc\ndef", "(DL (DT abc))(PARA def)"},
		{"; abc\n def", "(DL (DT abc))(PARA def)"},
		{"; abc\n  def", "(DL (DT abc SB def))"},
		{":", "(PARA :)"},
		{": ", "(PARA :)"},
		{": abc", "(PARA : abc)"},
		{"; abc\n: def", "(DL (DT abc) (DD (PARA def)))"},
		{"; abc\n: def\nghi", "(DL (DT abc) (DD (PARA def)))(PARA ghi)"},
		{"; abc\n: def\n ghi", "(DL (DT abc) (DD (PARA def)))(PARA ghi)"},
		{"; abc\n: def\n  ghi", "(DL (DT abc) (DD (PARA def SB ghi)))"},
		{"; abc\n: def\n\n  ghi", "(DL (DT abc) (DD (PARA def)(PARA ghi)))"},
		{"; abc\n:", "(DL (DT abc))(PARA :)"},
		{"; abc\n: def\n: ghi", "(DL (DT abc) (DD (PARA def)) (DD (PARA ghi)))"},
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
	})
	checkTcs(t, replace("\"", TestCases{
		{"::a::{py=3}", "(PARA {: a}[ATTR py=3])"},
		{"::a::{py=$2 3$}", "(PARA {: a}[ATTR py=$2 3$])"},
		{"::a::{py=$2\\$3$}", "(PARA {: a}[ATTR py=2$3])"},
		{"::a::{py=2$3}", "(PARA {: a}[ATTR py=2$3])"},
		{"::a::{py=$2\n3$}", "(PARA {: a}[ATTR py=$2\n3$])"},
		{"::a::{py=$2 3}", "(PARA {: a} {py=$2 SP 3})"},

		{"::a::{py=2 py=3}", "(PARA {: a}[ATTR py=$2 3$])"},
		{"::a::{.go .py}", "(PARA {: a}[ATTR class=$go py$])"},
	}))
}

func TestTemp(t *testing.T) {







|







746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
	})
	checkTcs(t, replace("\"", TestCases{
		{"::a::{py=3}", "(PARA {: a}[ATTR py=3])"},
		{"::a::{py=$2 3$}", "(PARA {: a}[ATTR py=$2 3$])"},
		{"::a::{py=$2\\$3$}", "(PARA {: a}[ATTR py=2$3])"},
		{"::a::{py=2$3}", "(PARA {: a}[ATTR py=2$3])"},
		{"::a::{py=$2\n3$}", "(PARA {: a}[ATTR py=$2\n3$])"},
		{"::a::{py=$2 3}", "(PARA {: a} {py=$2 3})"},

		{"::a::{py=2 py=3}", "(PARA {: a}[ATTR py=$2 3$])"},
		{"::a::{.go .py}", "(PARA {: a}[ATTR class=$go py$])"},
	}))
}

func TestTemp(t *testing.T) {
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
		tv.visitAttributes(n.Attrs)
	case *ast.BLOBNode:
		tv.sb.WriteString("(BLOB ")
		tv.sb.WriteString(n.Syntax)
		tv.sb.WriteString(")")
	case *ast.TextNode:
		tv.sb.WriteString(n.Text)
	case *ast.SpaceNode:
		if l := n.Count(); l == 1 {
			tv.sb.WriteString("SP")
		} else {
			fmt.Fprintf(&tv.sb, "SP%d", l)
		}
	case *ast.BreakNode:
		if n.Hard {
			tv.sb.WriteString("HB")
		} else {
			tv.sb.WriteString("SB")
		}
	case *ast.LinkNode:







<
<
<
<
<
<







877
878
879
880
881
882
883






884
885
886
887
888
889
890
		tv.visitAttributes(n.Attrs)
	case *ast.BLOBNode:
		tv.sb.WriteString("(BLOB ")
		tv.sb.WriteString(n.Syntax)
		tv.sb.WriteString(")")
	case *ast.TextNode:
		tv.sb.WriteString(n.Text)






	case *ast.BreakNode:
		if n.Hard {
			tv.sb.WriteString("HB")
		} else {
			tv.sb.WriteString("SB")
		}
	case *ast.LinkNode:

Changes to query/context.go.

115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
	old[n-1].meta = nil // avoid memory leak
	*q = old[0 : n-1]
	return item
}

type contextTask struct {
	port     ContextPort
	seen     id.Set
	queue    ztlCtxQueue
	maxCost  float64
	limit    int
	tagMetas map[string][]*meta.Meta
	tagZids  map[string]id.Set     // just the zids of tagMetas
	metaZid  map[id.Zid]*meta.Meta // maps zid to meta for all meta retrieved with tags
}

func newQueue(startSeq []*meta.Meta, maxCost float64, limit int, port ContextPort) *contextTask {
	result := &contextTask{
		port:     port,
		seen:     id.NewSet(),
		maxCost:  maxCost,
		limit:    limit,
		tagMetas: make(map[string][]*meta.Meta),
		tagZids:  make(map[string]id.Set),
		metaZid:  make(map[id.Zid]*meta.Meta),
	}

	queue := make(ztlCtxQueue, 0, len(startSeq))
	for _, m := range startSeq {
		queue = append(queue, ztlCtxItem{cost: 1, meta: m})
	}







|




|










|







115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
	old[n-1].meta = nil // avoid memory leak
	*q = old[0 : n-1]
	return item
}

type contextTask struct {
	port     ContextPort
	seen     *id.Set
	queue    ztlCtxQueue
	maxCost  float64
	limit    int
	tagMetas map[string][]*meta.Meta
	tagZids  map[string]*id.Set    // just the zids of tagMetas
	metaZid  map[id.Zid]*meta.Meta // maps zid to meta for all meta retrieved with tags
}

func newQueue(startSeq []*meta.Meta, maxCost float64, limit int, port ContextPort) *contextTask {
	result := &contextTask{
		port:     port,
		seen:     id.NewSet(),
		maxCost:  maxCost,
		limit:    limit,
		tagMetas: make(map[string][]*meta.Meta),
		tagZids:  make(map[string]*id.Set),
		metaZid:  make(map[id.Zid]*meta.Meta),
	}

	queue := make(ztlCtxQueue, 0, len(startSeq))
	for _, m := range startSeq {
		queue = append(queue, ztlCtxItem{cost: 1, meta: m})
	}
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
	}
}

func (ct *contextTask) addMeta(m *meta.Meta, newCost float64) {
	// If len(zc.seen) <= 1, the initial zettel is processed. In this case allow all
	// other zettel that are directly reachable, without taking the cost into account.
	// Of course, the limit ist still relevant.
	if !ct.hasLimit() && (len(ct.seen) <= 1 || ct.maxCost == 0 || newCost <= ct.maxCost) {
		if _, found := ct.seen[m.Zid]; !found {
			heap.Push(&ct.queue, ztlCtxItem{cost: newCost, meta: m})
		}
	}
}

func (ct *contextTask) addIDSet(ctx context.Context, newCost float64, value string) {
	elems := meta.ListFromValue(value)
	refCost := referenceCost(newCost, len(elems))
	for _, val := range elems {
		ct.addID(ctx, refCost, val)
	}
}

func referenceCost(baseCost float64, numReferences int) float64 {
	nRefs := float64(numReferences)
	return nRefs*math.Log2(nRefs+1) + baseCost
}

func (ct *contextTask) addTags(ctx context.Context, tags []string, baseCost float64) {
	var zidSet id.Set
	for _, tag := range tags {
		zs := ct.updateTagData(ctx, tag)
		zidSet = zidSet.Copy(zs)
	}
	for _, zid := range zidSet.Sorted() { // .Sorted() to stay deterministic
		minCost := math.MaxFloat64
		costFactor := 1.1
		for _, tag := range tags {
			tagZids := ct.tagZids[tag]
			if tagZids.Contains(zid) {
				cost := tagCost(baseCost, len(tagZids))
				if cost < minCost {
					minCost = cost
				}
				costFactor /= 1.1
			}
		}
		ct.addMeta(ct.metaZid[zid], minCost*costFactor)
	}
}

func (ct *contextTask) updateTagData(ctx context.Context, tag string) id.Set {
	if _, found := ct.tagMetas[tag]; found {
		return ct.tagZids[tag]
	}
	q := Parse(api.KeyTags + api.SearchOperatorHas + tag + " ORDER REVERSE " + api.KeyID)
	ml, err := ct.port.SelectMeta(ctx, nil, q)
	if err != nil {
		ml = nil







|
|



















|


|

|





|







|


|







196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
	}
}

func (ct *contextTask) addMeta(m *meta.Meta, newCost float64) {
	// If len(zc.seen) <= 1, the initial zettel is processed. In this case allow all
	// other zettel that are directly reachable, without taking the cost into account.
	// Of course, the limit ist still relevant.
	if !ct.hasLimit() && (ct.seen.Length() <= 1 || ct.maxCost == 0 || newCost <= ct.maxCost) {
		if ct.seen.Contains(m.Zid) {
			heap.Push(&ct.queue, ztlCtxItem{cost: newCost, meta: m})
		}
	}
}

func (ct *contextTask) addIDSet(ctx context.Context, newCost float64, value string) {
	elems := meta.ListFromValue(value)
	refCost := referenceCost(newCost, len(elems))
	for _, val := range elems {
		ct.addID(ctx, refCost, val)
	}
}

func referenceCost(baseCost float64, numReferences int) float64 {
	nRefs := float64(numReferences)
	return nRefs*math.Log2(nRefs+1) + baseCost
}

func (ct *contextTask) addTags(ctx context.Context, tags []string, baseCost float64) {
	var zidSet *id.Set
	for _, tag := range tags {
		zs := ct.updateTagData(ctx, tag)
		zidSet = zidSet.IUnion(zs)
	}
	zidSet.ForEach(func(zid id.Zid) {
		minCost := math.MaxFloat64
		costFactor := 1.1
		for _, tag := range tags {
			tagZids := ct.tagZids[tag]
			if tagZids.Contains(zid) {
				cost := tagCost(baseCost, tagZids.Length())
				if cost < minCost {
					minCost = cost
				}
				costFactor /= 1.1
			}
		}
		ct.addMeta(ct.metaZid[zid], minCost*costFactor)
	})
}

func (ct *contextTask) updateTagData(ctx context.Context, tag string) *id.Set {
	if _, found := ct.tagMetas[tag]; found {
		return ct.tagZids[tag]
	}
	q := Parse(api.KeyTags + api.SearchOperatorHas + tag + " ORDER REVERSE " + api.KeyID)
	ml, err := ct.port.SelectMeta(ctx, nil, q)
	if err != nil {
		ml = nil
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
	if ct.hasLimit() {
		return nil, -1
	}
	for len(ct.queue) > 0 {
		item := heap.Pop(&ct.queue).(ztlCtxItem)
		m := item.meta
		zid := m.Zid
		if _, found := ct.seen[zid]; found {
			continue
		}
		ct.seen.Add(zid)
		return m, item.cost
	}
	return nil, -1
}

func (ct *contextTask) hasLimit() bool {
	limit := ct.limit
	return limit > 0 && len(ct.seen) >= limit
}







|










|

274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
	if ct.hasLimit() {
		return nil, -1
	}
	for len(ct.queue) > 0 {
		item := heap.Pop(&ct.queue).(ztlCtxItem)
		m := item.meta
		zid := m.Zid
		if ct.seen.Contains(zid) {
			continue
		}
		ct.seen.Add(zid)
		return m, item.cost
	}
	return nil, -1
}

func (ct *contextTask) hasLimit() bool {
	limit := ct.limit
	return limit > 0 && ct.seen.Length() >= limit
}

Changes to query/parser.go.

82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
	for {
		pos := inp.Pos
		zid, found := ps.scanZid()
		if !found {
			inp.SetPos(pos)
			break
		}
		if !zidSet.ContainsOrNil(zid) {
			zidSet.Add(zid)
			q = createIfNeeded(q)
			q.zids = append(q.zids, zid)
		}
		ps.skipSpace()
		if ps.mustStop() {
			q.zids = nil







|







82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
	for {
		pos := inp.Pos
		zid, found := ps.scanZid()
		if !found {
			inp.SetPos(pos)
			break
		}
		if !zidSet.Contains(zid) {
			zidSet.Add(zid)
			q = createIfNeeded(q)
			q.zids = append(q.zids, zid)
		}
		ps.skipSpace()
		if ps.mustStop() {
			q.zids = nil

Changes to query/query.go.

23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
	"zettelstore.de/z/zettel/meta"
)

// Searcher is used to select zettel identifier based on search criteria.
type Searcher interface {
	// Select all zettel that contains the given exact word.
	// The word must be normalized through Unicode NKFD, trimmed and not empty.
	SearchEqual(word string) id.Set

	// Select all zettel that have a word with the given prefix.
	// The prefix must be normalized through Unicode NKFD, trimmed and not empty.
	SearchPrefix(prefix string) id.Set

	// Select all zettel that have a word with the given suffix.
	// The suffix must be normalized through Unicode NKFD, trimmed and not empty.
	SearchSuffix(suffix string) id.Set

	// Select all zettel that contains the given string.
	// The string must be normalized through Unicode NKFD, trimmed and not empty.
	SearchContains(s string) id.Set
}

// Query specifies a mechanism for querying zettel.
type Query struct {
	// Präfixed zettel identifier.
	zids []id.Zid








|



|



|



|







23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
	"zettelstore.de/z/zettel/meta"
)

// Searcher is used to select zettel identifier based on search criteria.
type Searcher interface {
	// Select all zettel that contains the given exact word.
	// The word must be normalized through Unicode NKFD, trimmed and not empty.
	SearchEqual(word string) *id.Set

	// Select all zettel that have a word with the given prefix.
	// The prefix must be normalized through Unicode NKFD, trimmed and not empty.
	SearchPrefix(prefix string) *id.Set

	// Select all zettel that have a word with the given suffix.
	// The suffix must be normalized through Unicode NKFD, trimmed and not empty.
	SearchSuffix(suffix string) *id.Set

	// Select all zettel that contains the given string.
	// The string must be normalized through Unicode NKFD, trimmed and not empty.
	SearchContains(s string) *id.Set
}

// Query specifies a mechanism for querying zettel.
type Query struct {
	// Präfixed zettel identifier.
	zids []id.Zid

407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
			cTerm.Match = matchAlways
		}
		result.Terms = append(result.Terms, cTerm)
	}
	return result
}

func metaList2idSet(ml []*meta.Meta) id.Set {
	if ml == nil {
		return nil
	}
	result := id.NewSetCap(len(ml))
	for _, m := range ml {
		result = result.Add(m.Zid)
	}
	return result
}

func (ct *conjTerms) retrieveAndCompileTerm(searcher Searcher, startSet id.Set) CompiledTerm {
	match := ct.compileMeta() // Match might add some searches
	var pred RetrievePredicate
	if searcher != nil {
		pred = ct.retrieveIndex(searcher)
		if startSet != nil {
			if pred == nil {
				pred = startSet.ContainsOrNil
			} else {
				predSet := id.NewSetCap(len(startSet))
				for zid := range startSet {
					if pred(zid) {
						predSet = predSet.Add(zid)
					}
				}
				pred = predSet.ContainsOrNil
			}
		}
	}
	return CompiledTerm{Match: match, Retrieve: pred}
}








|










|








|
|



|







407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
			cTerm.Match = matchAlways
		}
		result.Terms = append(result.Terms, cTerm)
	}
	return result
}

func metaList2idSet(ml []*meta.Meta) *id.Set {
	if ml == nil {
		return nil
	}
	result := id.NewSetCap(len(ml))
	for _, m := range ml {
		result = result.Add(m.Zid)
	}
	return result
}

func (ct *conjTerms) retrieveAndCompileTerm(searcher Searcher, startSet *id.Set) CompiledTerm {
	match := ct.compileMeta() // Match might add some searches
	var pred RetrievePredicate
	if searcher != nil {
		pred = ct.retrieveIndex(searcher)
		if startSet != nil {
			if pred == nil {
				pred = startSet.ContainsOrNil
			} else {
				predSet := id.NewSetCap(startSet.Length())
				startSet.ForEach(func(zid id.Zid) {
					if pred(zid) {
						predSet = predSet.Add(zid)
					}
				})
				pred = predSet.ContainsOrNil
			}
		}
	}
	return CompiledTerm{Match: match, Retrieve: pred}
}

457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
	positives := retrievePositives(normCalls, plainCalls)
	if positives == nil {
		// No positive search for words, must contain only words for a negative search.
		// Otherwise len(search) == 0 (see above)
		negatives := retrieveNegatives(negCalls)
		return func(zid id.Zid) bool { return !negatives.ContainsOrNil(zid) }
	}
	if len(positives) == 0 {
		// Positive search didn't found anything. We can omit the negative search.
		return neverIncluded
	}
	if len(negCalls) == 0 {
		// Positive search found something, but there is no negative search.
		return positives.ContainsOrNil
	}
	negatives := retrieveNegatives(negCalls)
	if negatives == nil {
		return positives.ContainsOrNil
	}
	return func(zid id.Zid) bool {
		return positives.ContainsOrNil(zid) && !negatives.ContainsOrNil(zid)
	}
}

// Limit returns only s.GetLimit() elements of the given list.
func (q *Query) Limit(metaList []*meta.Meta) []*meta.Meta {
	if q == nil {
		return metaList
	}
	return limitElements(metaList, q.limit)
}







|





|



|


|










457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
	positives := retrievePositives(normCalls, plainCalls)
	if positives == nil {
		// No positive search for words, must contain only words for a negative search.
		// Otherwise len(search) == 0 (see above)
		negatives := retrieveNegatives(negCalls)
		return func(zid id.Zid) bool { return !negatives.ContainsOrNil(zid) }
	}
	if positives.IsEmpty() {
		// Positive search didn't found anything. We can omit the negative search.
		return neverIncluded
	}
	if len(negCalls) == 0 {
		// Positive search found something, but there is no negative search.
		return positives.Contains
	}
	negatives := retrieveNegatives(negCalls)
	if negatives == nil {
		return positives.Contains
	}
	return func(zid id.Zid) bool {
		return positives.Contains(zid) && !negatives.ContainsOrNil(zid)
	}
}

// Limit returns only s.GetLimit() elements of the given list.
func (q *Query) Limit(metaList []*meta.Meta) []*meta.Meta {
	if q == nil {
		return metaList
	}
	return limitElements(metaList, q.limit)
}

Changes to query/retrieve.go.

23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
	"zettelstore.de/z/zettel/id"
)

type searchOp struct {
	s  string
	op compareOp
}
type searchFunc func(string) id.Set
type searchCallMap map[searchOp]searchFunc

var cmpPred = map[compareOp]func(string, string) bool{
	cmpEqual:   stringEqual,
	cmpPrefix:  strings.HasPrefix,
	cmpSuffix:  strings.HasSuffix,
	cmpMatch:   strings.Contains,







|







23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
	"zettelstore.de/z/zettel/id"
)

type searchOp struct {
	s  string
	op compareOp
}
type searchFunc func(string) *id.Set
type searchCallMap map[searchOp]searchFunc

var cmpPred = map[compareOp]func(string, string) bool{
	cmpEqual:   stringEqual,
	cmpPrefix:  strings.HasPrefix,
	cmpSuffix:  strings.HasSuffix,
	cmpMatch:   strings.Contains,
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
		if _, found := plainCalls[val]; found {
			return true
		}
	}
	return false
}

func retrievePositives(normCalls, plainCalls searchCallMap) id.Set {
	if isSuperset(normCalls, plainCalls) {
		var normResult id.Set
		for c, sf := range normCalls {
			normResult = normResult.IntersectOrSet(sf(c.s))
		}
		return normResult
	}

	type searchResults map[searchOp]id.Set
	var cache searchResults
	var plainResult id.Set
	for c, sf := range plainCalls {
		result := sf(c.s)
		if _, found := normCalls[c]; found {
			if cache == nil {
				cache = make(searchResults)
			}
			cache[c] = result
		}
		plainResult = plainResult.IntersectOrSet(result)
	}
	var normResult id.Set
	for c, sf := range normCalls {
		if cache != nil {
			if result, found := cache[c]; found {
				normResult = normResult.IntersectOrSet(result)
				continue
			}
		}
		normResult = normResult.IntersectOrSet(sf(c.s))
	}
	return normResult.Copy(plainResult)
}

func isSuperset(normCalls, plainCalls searchCallMap) bool {
	for c := range plainCalls {
		if _, found := normCalls[c]; !found {
			return false
		}
	}
	return true
}

func retrieveNegatives(negCalls searchCallMap) id.Set {
	var negatives id.Set
	for val, sf := range negCalls {
		negatives = negatives.Copy(sf(val.s))
	}
	return negatives
}

func getSearchFunc(searcher Searcher, op compareOp) searchFunc {
	switch op {
	case cmpEqual:







|

|






|

|










|









|











|
|

|







100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
		if _, found := plainCalls[val]; found {
			return true
		}
	}
	return false
}

func retrievePositives(normCalls, plainCalls searchCallMap) *id.Set {
	if isSuperset(normCalls, plainCalls) {
		var normResult *id.Set
		for c, sf := range normCalls {
			normResult = normResult.IntersectOrSet(sf(c.s))
		}
		return normResult
	}

	type searchResults map[searchOp]*id.Set
	var cache searchResults
	var plainResult *id.Set
	for c, sf := range plainCalls {
		result := sf(c.s)
		if _, found := normCalls[c]; found {
			if cache == nil {
				cache = make(searchResults)
			}
			cache[c] = result
		}
		plainResult = plainResult.IntersectOrSet(result)
	}
	var normResult *id.Set
	for c, sf := range normCalls {
		if cache != nil {
			if result, found := cache[c]; found {
				normResult = normResult.IntersectOrSet(result)
				continue
			}
		}
		normResult = normResult.IntersectOrSet(sf(c.s))
	}
	return normResult.IUnion(plainResult)
}

func isSuperset(normCalls, plainCalls searchCallMap) bool {
	for c := range plainCalls {
		if _, found := normCalls[c]; !found {
			return false
		}
	}
	return true
}

func retrieveNegatives(negCalls searchCallMap) *id.Set {
	var negatives *id.Set
	for val, sf := range negCalls {
		negatives = negatives.IUnion(sf(val.s))
	}
	return negatives
}

func getSearchFunc(searcher Searcher, op compareOp) searchFunc {
	switch op {
	case cmpEqual:

Changes to strfun/slugify_test.go.

34
35
36
37
38
39
40



41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
		if got := strfun.Slugify(test.in); got != test.exp {
			t.Errorf("%q: %q != %q", test.in, got, test.exp)
		}
	}
}

func eqStringSlide(got, exp []string) bool {



	if len(got) != len(exp) {
		return false
	}
	for i, g := range got {
		if g != exp[i] {
			return false
		}
	}
	return true
}

func TestNormalizeWord(t *testing.T) {
	t.Parallel()
	tests := []struct {
		in  string
		exp []string
	}{
		{"", []string{}},
		{" ", []string{}},
		{"Ë‹", []string{}}, // No single diacritic char, such as U+02CB
		{"simple test", []string{"simple", "test"}},
		{"I'm a go developer", []string{"i", "m", "a", "go", "developer"}},
		{"-!->simple   test<-!-", []string{"simple", "test"}},
		{"äöüÄÖÜß", []string{"aouaouß"}},
		{"\"aèf", []string{"aef"}},
		{"a#b", []string{"a", "b"}},
		{"*", []string{}},
		{"123", []string{"123"}},
		{"1²3", []string{"123"}},
		{"Period.", []string{"period"}},
		{" WORD  NUMBER ", []string{"word", "number"}},
	}
	for _, test := range tests {
		if got := strfun.NormalizeWords(test.in); !eqStringSlide(got, test.exp) {
			t.Errorf("%q: %q != %q", test.in, got, test.exp)
		}
	}
}







>
>
>

















|
|
|






|











34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
		if got := strfun.Slugify(test.in); got != test.exp {
			t.Errorf("%q: %q != %q", test.in, got, test.exp)
		}
	}
}

func eqStringSlide(got, exp []string) bool {
	if got == nil {
		return exp == nil
	}
	if len(got) != len(exp) {
		return false
	}
	for i, g := range got {
		if g != exp[i] {
			return false
		}
	}
	return true
}

func TestNormalizeWord(t *testing.T) {
	t.Parallel()
	tests := []struct {
		in  string
		exp []string
	}{
		{"", nil},
		{" ", nil},
		{"Ë‹", nil}, // No single diacritic char, such as U+02CB
		{"simple test", []string{"simple", "test"}},
		{"I'm a go developer", []string{"i", "m", "a", "go", "developer"}},
		{"-!->simple   test<-!-", []string{"simple", "test"}},
		{"äöüÄÖÜß", []string{"aouaouß"}},
		{"\"aèf", []string{"aef"}},
		{"a#b", []string{"a", "b"}},
		{"*", nil},
		{"123", []string{"123"}},
		{"1²3", []string{"123"}},
		{"Period.", []string{"period"}},
		{" WORD  NUMBER ", []string{"word", "number"}},
	}
	for _, test := range tests {
		if got := strfun.NormalizeWords(test.in); !eqStringSlide(got, test.exp) {
			t.Errorf("%q: %q != %q", test.in, got, test.exp)
		}
	}
}

Changes to usecase/query.go.

170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
			}
		}
	}
	candidates = filterByZid(candidates, refZids)
	return uc.filterCandidates(ctx, candidates, words)
}

func filterByZid(candidates []*meta.Meta, ignoreSeq id.Set) []*meta.Meta {
	result := make([]*meta.Meta, 0, len(candidates))
	for _, m := range candidates {
		if !ignoreSeq.ContainsOrNil(m.Zid) {
			result = append(result, m)
		}
	}
	return result
}

func (uc *Query) filterCandidates(ctx context.Context, candidates []*meta.Meta, words []string) []*meta.Meta {







|


|







170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
			}
		}
	}
	candidates = filterByZid(candidates, refZids)
	return uc.filterCandidates(ctx, candidates, words)
}

func filterByZid(candidates []*meta.Meta, ignoreSeq *id.Set) []*meta.Meta {
	result := make([]*meta.Meta, 0, len(candidates))
	for _, m := range candidates {
		if !ignoreSeq.Contains(m.Zid) {
			result = append(result, m)
		}
	}
	return result
}

func (uc *Query) filterCandidates(ctx context.Context, candidates []*meta.Meta, words []string) []*meta.Meta {
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
func (v *unlinkedVisitor) splitInlineTextList(is *ast.InlineSlice) []string {
	var result []string
	var curList []string
	for _, in := range *is {
		switch n := in.(type) {
		case *ast.TextNode:
			curList = append(curList, strfun.MakeWords(n.Text)...)
		case *ast.SpaceNode:
		default:
			if curList != nil {
				result = append(result, v.joinWords(curList))
				curList = nil
			}
		}
	}
	if curList != nil {
		result = append(result, v.joinWords(curList))
	}
	return result
}







<












260
261
262
263
264
265
266

267
268
269
270
271
272
273
274
275
276
277
278
func (v *unlinkedVisitor) splitInlineTextList(is *ast.InlineSlice) []string {
	var result []string
	var curList []string
	for _, in := range *is {
		switch n := in.(type) {
		case *ast.TextNode:
			curList = append(curList, strfun.MakeWords(n.Text)...)

		default:
			if curList != nil {
				result = append(result, v.joinWords(curList))
				curList = nil
			}
		}
	}
	if curList != nil {
		result = append(result, v.joinWords(curList))
	}
	return result
}

Changes to web/adapter/webui/sxn_code.go.

57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
type getMetaFunc func(context.Context, id.Zid) (*meta.Meta, error)

func buildSxnCodeDigraph(ctx context.Context, startZid id.Zid, getMeta getMetaFunc) id.Digraph {
	m, err := getMeta(ctx, startZid)
	if err != nil {
		return nil
	}
	var marked id.Set
	stack := []*meta.Meta{m}
	dg := id.Digraph(nil).AddVertex(startZid)
	for pos := len(stack) - 1; pos >= 0; pos = len(stack) - 1 {
		curr := stack[pos]
		stack = stack[:pos]
		if marked.Contains(curr.Zid) {
			continue







|







57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
type getMetaFunc func(context.Context, id.Zid) (*meta.Meta, error)

func buildSxnCodeDigraph(ctx context.Context, startZid id.Zid, getMeta getMetaFunc) id.Digraph {
	m, err := getMeta(ctx, startZid)
	if err != nil {
		return nil
	}
	var marked *id.Set
	stack := []*meta.Meta{m}
	dg := id.Digraph(nil).AddVertex(startZid)
	for pos := len(stack) - 1; pos >= 0; pos = len(stack) - 1 {
		curr := stack[pos]
		stack = stack[:pos]
		if marked.Contains(curr.Zid) {
			continue

Changes to zettel/id/digraph.go.

15
16
17
18
19
20
21
22
23
24
25
26
27
28
29

import (
	"maps"
	"slices"
)

// Digraph relates zettel identifier in a directional way.
type Digraph map[Zid]Set

// AddVertex adds an edge / vertex to the digraph.
func (dg Digraph) AddVertex(zid Zid) Digraph {
	if dg == nil {
		return Digraph{zid: nil}
	}
	if _, found := dg[zid]; !found {







|







15
16
17
18
19
20
21
22
23
24
25
26
27
28
29

import (
	"maps"
	"slices"
)

// Digraph relates zettel identifier in a directional way.
type Digraph map[Zid]*Set

// AddVertex adds an edge / vertex to the digraph.
func (dg Digraph) AddVertex(zid Zid) Digraph {
	if dg == nil {
		return Digraph{zid: nil}
	}
	if _, found := dg[zid]; !found {
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
	}
}

// AddEdge adds a connection from `zid1` to `zid2`.
// Both vertices must be added before. Otherwise the function may panic.
func (dg Digraph) AddEdge(fromZid, toZid Zid) Digraph {
	if dg == nil {
		return Digraph{fromZid: Set(nil).Add(toZid), toZid: nil}
	}
	dg[fromZid] = dg[fromZid].Add(toZid)
	return dg
}

// AddEgdes adds all given `Edge`s to the digraph.
//







|







42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
	}
}

// AddEdge adds a connection from `zid1` to `zid2`.
// Both vertices must be added before. Otherwise the function may panic.
func (dg Digraph) AddEdge(fromZid, toZid Zid) Digraph {
	if dg == nil {
		return Digraph{fromZid: (*Set)(nil).Add(toZid), toZid: nil}
	}
	dg[fromZid] = dg[fromZid].Add(toZid)
	return dg
}

// AddEgdes adds all given `Edge`s to the digraph.
//
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
		dg = dg.AddEdge(edge.From, edge.To)
	}
	return dg
}

// Equal returns true if both digraphs have the same vertices and edges.
func (dg Digraph) Equal(other Digraph) bool {
	return maps.EqualFunc(dg, other, func(cg, co Set) bool { return cg.Equal(co) })
}

// Clone a digraph.
func (dg Digraph) Clone() Digraph {
	if len(dg) == 0 {
		return nil
	}







|







68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
		dg = dg.AddEdge(edge.From, edge.To)
	}
	return dg
}

// Equal returns true if both digraphs have the same vertices and edges.
func (dg Digraph) Equal(other Digraph) bool {
	return maps.EqualFunc(dg, other, func(cg, co *Set) bool { return cg.Equal(co) })
}

// Clone a digraph.
func (dg Digraph) Clone() Digraph {
	if len(dg) == 0 {
		return nil
	}
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
		return false
	}
	_, found := dg[zid]
	return found
}

// Vertices returns the set of all vertices.
func (dg Digraph) Vertices() Set {
	if len(dg) == 0 {
		return nil
	}
	verts := NewSetCap(len(dg))
	for vert := range dg {
		verts.Add(vert)
	}
	return verts
}

// Edges returns an unsorted slice of the edges of the digraph.
func (dg Digraph) Edges() (es EdgeSlice) {
	for vert, closure := range dg {
		for next := range closure {
			es = append(es, Edge{From: vert, To: next})
		}
	}
	return es
}

// Originators will return the set of all vertices that are not referenced
// a the to-part of an edge.
func (dg Digraph) Originators() Set {
	if len(dg) == 0 {
		return nil
	}
	origs := dg.Vertices()
	for _, closure := range dg {
		origs.Substract(closure)
	}
	return origs
}

// Terminators returns the set of all vertices that does not reference
// other vertices.
func (dg Digraph) Terminators() (terms Set) {
	for vert, closure := range dg {
		if len(closure) == 0 {
			terms = terms.Add(vert)
		}
	}
	return terms
}

// TransitiveClosure calculates the sub-graph that is reachable from `zid`.
func (dg Digraph) TransitiveClosure(zid Zid) (tc Digraph) {
	if len(dg) == 0 {
		return nil
	}
	var marked Set
	stack := Slice{zid}
	for pos := len(stack) - 1; pos >= 0; pos = len(stack) - 1 {
		curr := stack[pos]
		stack = stack[:pos]
		if marked.Contains(curr) {
			continue
		}
		tc = tc.AddVertex(curr)
		for next := range dg[curr] {
			tc = tc.AddVertex(next)
			tc = tc.AddEdge(curr, next)
			stack = append(stack, next)
		}
		marked = marked.Add(curr)
	}
	return tc
}

// ReachableVertices calculates the set of all vertices that are reachable
// from the given `zid`.
func (dg Digraph) ReachableVertices(zid Zid) (tc Set) {
	if len(dg) == 0 {
		return nil
	}
	stack := dg[zid].Sorted()
	for last := len(stack) - 1; last >= 0; last = len(stack) - 1 {
		curr := stack[last]
		stack = stack[:last]
		if tc.Contains(curr) {
			continue
		}
		closure, found := dg[curr]
		if !found {
			continue
		}
		tc = tc.Add(curr)
		for next := range closure {
			stack = append(stack, next)
		}
	}
	return tc
}

// IsDAG returns a vertex and false, if the graph has a cycle containing the vertex.
func (dg Digraph) IsDAG() (Zid, bool) {
	for vertex := range dg {
		if dg.ReachableVertices(vertex).Contains(vertex) {
			return vertex, false
		}
	}
	return Invalid, true
}

// Reverse returns a graph with reversed edges.
func (dg Digraph) Reverse() (revDg Digraph) {
	for vertex, closure := range dg {
		revDg = revDg.AddVertex(vertex)
		for next := range closure {
			revDg = revDg.AddVertex(next)
			revDg = revDg.AddEdge(next, vertex)
		}
	}
	return revDg
}

// SortReverse returns a deterministic, topological, reverse sort of the
// digraph.
//
// Works only if digraph is a DAG. Otherwise the algorithm will not terminate
// or returns an arbitrary value.
func (dg Digraph) SortReverse() (sl Slice) {
	if len(dg) == 0 {
		return nil
	}
	tempDg := dg.Clone()
	for len(tempDg) > 0 {
		terms := tempDg.Terminators()
		if len(terms) == 0 {
			break
		}
		termSlice := terms.Sorted()
		slices.Reverse(termSlice)
		sl = append(sl, termSlice...)
		for t := range terms {
			tempDg.RemoveVertex(t)
		}
	}
	return sl
}







|













|

|






|





|






|

|











|








|



|







|



|











|

|


















|


|
















|


|


|

|



93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
		return false
	}
	_, found := dg[zid]
	return found
}

// Vertices returns the set of all vertices.
func (dg Digraph) Vertices() *Set {
	if len(dg) == 0 {
		return nil
	}
	verts := NewSetCap(len(dg))
	for vert := range dg {
		verts.Add(vert)
	}
	return verts
}

// Edges returns an unsorted slice of the edges of the digraph.
func (dg Digraph) Edges() (es EdgeSlice) {
	for vert, closure := range dg {
		closure.ForEach(func(next Zid) {
			es = append(es, Edge{From: vert, To: next})
		})
	}
	return es
}

// Originators will return the set of all vertices that are not referenced
// a the to-part of an edge.
func (dg Digraph) Originators() *Set {
	if len(dg) == 0 {
		return nil
	}
	origs := dg.Vertices()
	for _, closure := range dg {
		origs.ISubstract(closure)
	}
	return origs
}

// Terminators returns the set of all vertices that does not reference
// other vertices.
func (dg Digraph) Terminators() (terms *Set) {
	for vert, closure := range dg {
		if closure.IsEmpty() {
			terms = terms.Add(vert)
		}
	}
	return terms
}

// TransitiveClosure calculates the sub-graph that is reachable from `zid`.
func (dg Digraph) TransitiveClosure(zid Zid) (tc Digraph) {
	if len(dg) == 0 {
		return nil
	}
	var marked *Set
	stack := Slice{zid}
	for pos := len(stack) - 1; pos >= 0; pos = len(stack) - 1 {
		curr := stack[pos]
		stack = stack[:pos]
		if marked.Contains(curr) {
			continue
		}
		tc = tc.AddVertex(curr)
		dg[curr].ForEach(func(next Zid) {
			tc = tc.AddVertex(next)
			tc = tc.AddEdge(curr, next)
			stack = append(stack, next)
		})
		marked = marked.Add(curr)
	}
	return tc
}

// ReachableVertices calculates the set of all vertices that are reachable
// from the given `zid`.
func (dg Digraph) ReachableVertices(zid Zid) (tc *Set) {
	if len(dg) == 0 {
		return nil
	}
	stack := dg[zid].SafeSorted()
	for last := len(stack) - 1; last >= 0; last = len(stack) - 1 {
		curr := stack[last]
		stack = stack[:last]
		if tc.Contains(curr) {
			continue
		}
		closure, found := dg[curr]
		if !found {
			continue
		}
		tc = tc.Add(curr)
		closure.ForEach(func(next Zid) {
			stack = append(stack, next)
		})
	}
	return tc
}

// IsDAG returns a vertex and false, if the graph has a cycle containing the vertex.
func (dg Digraph) IsDAG() (Zid, bool) {
	for vertex := range dg {
		if dg.ReachableVertices(vertex).Contains(vertex) {
			return vertex, false
		}
	}
	return Invalid, true
}

// Reverse returns a graph with reversed edges.
func (dg Digraph) Reverse() (revDg Digraph) {
	for vertex, closure := range dg {
		revDg = revDg.AddVertex(vertex)
		closure.ForEach(func(next Zid) {
			revDg = revDg.AddVertex(next)
			revDg = revDg.AddEdge(next, vertex)
		})
	}
	return revDg
}

// SortReverse returns a deterministic, topological, reverse sort of the
// digraph.
//
// Works only if digraph is a DAG. Otherwise the algorithm will not terminate
// or returns an arbitrary value.
func (dg Digraph) SortReverse() (sl Slice) {
	if len(dg) == 0 {
		return nil
	}
	tempDg := dg.Clone()
	for len(tempDg) > 0 {
		terms := tempDg.Terminators()
		if terms.IsEmpty() {
			break
		}
		termSlice := terms.SafeSorted()
		slices.Reverse(termSlice)
		sl = append(sl, termSlice...)
		terms.ForEach(func(t Zid) {
			tempDg.RemoveVertex(t)
		})
	}
	return sl
}

Changes to zettel/id/digraph_test.go.

26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
}

func TestDigraphOriginators(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		name string
		dg   id.EdgeSlice
		orig id.Set
		term id.Set
	}{
		{"empty", nil, nil, nil},
		{"single", zps{{0, 1}}, id.NewSet(0), id.NewSet(1)},
		{"chain", zps{{0, 1}, {1, 2}, {2, 3}}, id.NewSet(0), id.NewSet(3)},
	}
	for _, tc := range testcases {
		t.Run(tc.name, func(t *testing.T) {







|
|







26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
}

func TestDigraphOriginators(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		name string
		dg   id.EdgeSlice
		orig *id.Set
		term *id.Set
	}{
		{"empty", nil, nil, nil},
		{"single", zps{{0, 1}}, id.NewSet(0), id.NewSet(1)},
		{"chain", zps{{0, 1}, {1, 2}, {2, 3}}, id.NewSet(0), id.NewSet(3)},
	}
	for _, tc := range testcases {
		t.Run(tc.name, func(t *testing.T) {
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66

func TestDigraphReachableVertices(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		name  string
		pairs id.EdgeSlice
		start id.Zid
		exp   id.Set
	}{
		{"nil", nil, 0, nil},
		{"0-2", zps{{1, 2}, {2, 3}}, 1, id.NewSet(2, 3)},
		{"1,2", zps{{1, 2}, {2, 3}}, 2, id.NewSet(3)},
		{"0-2,1-2", zps{{1, 2}, {2, 3}, {1, 3}}, 1, id.NewSet(2, 3)},
		{"0-2,1-2/1", zps{{1, 2}, {2, 3}, {1, 3}}, 2, id.NewSet(3)},
		{"0-2,1-2/2", zps{{1, 2}, {2, 3}, {1, 3}}, 3, nil},







|







52
53
54
55
56
57
58
59
60
61
62
63
64
65
66

func TestDigraphReachableVertices(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		name  string
		pairs id.EdgeSlice
		start id.Zid
		exp   *id.Set
	}{
		{"nil", nil, 0, nil},
		{"0-2", zps{{1, 2}, {2, 3}}, 1, id.NewSet(2, 3)},
		{"1,2", zps{{1, 2}, {2, 3}}, 2, id.NewSet(3)},
		{"0-2,1-2", zps{{1, 2}, {2, 3}, {1, 3}}, 1, id.NewSet(2, 3)},
		{"0-2,1-2/1", zps{{1, 2}, {2, 3}, {1, 3}}, 2, id.NewSet(3)},
		{"0-2,1-2/2", zps{{1, 2}, {2, 3}, {1, 3}}, 3, nil},

Changes to zettel/id/set.go.

10
11
12
13
14
15
16
17
18
19
20
21
22

23

24
25





26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44

45
46


47
48
49
50
51

52
53

54
55
56
57
58

59

60
61



62

63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117

118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145

146
147



148
149



150

151



152
153
154



155
156


157

158
159
160
161
162





















163
















164






165















166
167
168
169
170
171
172
173



174
175
176
177
178
179
180
181










































































// SPDX-License-Identifier: EUPL-1.2
// SPDX-FileCopyrightText: 2021-present Detlef Stern
//-----------------------------------------------------------------------------

package id

import (
	"maps"
	"strings"
)

// Set is a set of zettel identifier
type Set map[Zid]struct{}



// String returns a string representation of the map.
func (s Set) String() string {





	if s == nil {
		return "{}"
	}
	var sb strings.Builder
	sb.WriteByte('{')
	for i, zid := range s.Sorted() {
		if i > 0 {
			sb.WriteByte(' ')
		}
		sb.Write(zid.Bytes())
	}
	sb.WriteByte('}')
	return sb.String()
}

// NewSet returns a new set of identifier with the given initial values.
func NewSet(zids ...Zid) Set {
	l := len(zids)
	if l < 8 {

		l = 8
	}


	result := make(Set, l)
	result.CopySlice(zids)
	return result
}


// NewSetCap returns a new set of identifier with the given capacity and initial values.
func NewSetCap(c int, zids ...Zid) Set {

	l := len(zids)
	if c < l {
		c = l
	}
	if c < 8 {

		c = 8

	}
	result := make(Set, c)



	result.CopySlice(zids)

	return result
}

// Clone returns a copy of the given set.
func (s Set) Clone() Set {
	if len(s) == 0 {
		return nil
	}
	return maps.Clone(s)
}

// Add adds a Add to the set.
func (s Set) Add(zid Zid) Set {
	if s == nil {
		return NewSet(zid)
	}
	s[zid] = struct{}{}
	return s
}

// Contains return true if the set is non-nil and the set contains the given Zettel identifier.
func (s Set) Contains(zid Zid) bool {
	if s != nil {
		_, found := s[zid]
		return found
	}
	return false
}

// ContainsOrNil return true if the set is nil or if the set contains the given Zettel identifier.
func (s Set) ContainsOrNil(zid Zid) bool {
	if s != nil {
		_, found := s[zid]
		return found
	}
	return true
}

// Copy adds all member from the other set.
func (s Set) Copy(other Set) Set {
	if s == nil {
		if len(other) == 0 {
			return nil
		}
		s = NewSetCap(len(other))
	}
	maps.Copy(s, other)
	return s
}

// CopySlice adds all identifier of the given slice to the set.
func (s Set) CopySlice(sl Slice) Set {
	if s == nil {
		s = NewSetCap(len(sl))
	}

	for _, zid := range sl {
		s[zid] = struct{}{}
	}
	return s
}

// Sorted returns the set as a sorted slice of zettel identifier.
func (s Set) Sorted() Slice {
	if l := len(s); l > 0 {
		result := make(Slice, 0, l)
		for zid := range s {
			result = append(result, zid)
		}
		result.Sort()
		return result
	}
	return nil
}

// IntersectOrSet removes all zettel identifier that are not in the other set.
// Both sets can be modified by this method. One of them is the set returned.
// It contains the intersection of both, if s is not nil.
//
// If s == nil, then the other set is always returned.
func (s Set) IntersectOrSet(other Set) Set {
	if s == nil {
		return other
	}

	if len(s) > len(other) {
		s, other = other, s



	}
	for zid := range s {



		_, otherOk := other[zid]

		if !otherOk {



			delete(s, zid)
		}
	}



	return s
}




// Substract removes all zettel identifier from 's' that are in the set 'other'.
func (s Set) Substract(other Set) {
	if s == nil || other == nil {
		return
	}





















	for zid := range other {
















		delete(s, zid)






	}















}

// Remove the identifier from the set.
func (s Set) Remove(zid Zid) Set {
	if len(s) == 0 {
		return nil
	}
	delete(s, zid)



	if len(s) == 0 {
		return nil
	}
	return s
}

// Equal returns true if the other set is equal to the given set.
func (s Set) Equal(other Set) bool { return maps.Equal(s, other) }

















































































|




|
>
|
>
|
|
>
>
>
>
>
|
|


<
|





<




|
|
|
>
|
<
>
>
|
|
|
|
|
>

|
>
|
<
|
|
|
>
|
>
|
|
>
>
>
|
>
|



|
|


|



|



|




|
<
<
<
|
<
<
<

|
<
<
<
|
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
|
|

|

>

|




|
|
<
<
|
<
<
<
|

|







|
|


>
|
|
>
>
>
|
|
>
>
>
|
>
|
>
>
>
|
|
|
>
>
>
|
|
>
>
|
>
|
|
|


>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
>
>
>
>
>
>
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>



|
|


|
>
>
>
|






|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36

37
38
39
40
41
42

43
44
45
46
47
48
49
50
51

52
53
54
55
56
57
58
59
60
61
62
63

64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98



99



100
101



102















103
104
105
106
107
108
109
110
111
112
113
114
115
116


117



118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
// SPDX-License-Identifier: EUPL-1.2
// SPDX-FileCopyrightText: 2021-present Detlef Stern
//-----------------------------------------------------------------------------

package id

import (
	"slices"
	"strings"
)

// Set is a set of zettel identifier
type Set struct {
	seq []Zid
}

// String returns a string representation of the set.
func (s *Set) String() string {
	return "{" + s.MetaString() + "}"
}

// MetaString returns a string representation of the set to be stored as metadata.
func (s *Set) MetaString() string {
	if s == nil || len(s.seq) == 0 {
		return ""
	}
	var sb strings.Builder

	for i, zid := range s.seq {
		if i > 0 {
			sb.WriteByte(' ')
		}
		sb.Write(zid.Bytes())
	}

	return sb.String()
}

// NewSet returns a new set of identifier with the given initial values.
func NewSet(zids ...Zid) *Set {
	switch l := len(zids); l {
	case 0:
		return &Set{seq: nil}
	case 1:

		return &Set{seq: []Zid{zids[0]}}
	default:
		result := Set{seq: make(Slice, 0, l)}
		result.AddSlice(zids)
		return &result
	}
}

// NewSetCap returns a new set of identifier with the given capacity and initial values.
func NewSetCap(c int, zids ...Zid) *Set {
	result := Set{seq: make(Slice, 0, max(c, len(zids)))}
	result.AddSlice(zids)

	return &result
}

// IsEmpty returns true, if the set conains no element.
func (s *Set) IsEmpty() bool {
	return s == nil || len(s.seq) == 0
}

// Length returns the number of elements in this set.
func (s *Set) Length() int {
	if s == nil {
		return 0
	}
	return len(s.seq)
}

// Clone returns a copy of the given set.
func (s *Set) Clone() *Set {
	if s == nil || len(s.seq) == 0 {
		return nil
	}
	return &Set{seq: slices.Clone(s.seq)}
}

// Add adds a Add to the set.
func (s *Set) Add(zid Zid) *Set {
	if s == nil {
		return NewSet(zid)
	}
	s.add(zid)
	return s
}

// Contains return true if the set is non-nil and the set contains the given Zettel identifier.
func (s *Set) Contains(zid Zid) bool { return s != nil && s.contains(zid) }







// ContainsOrNil return true if the set is nil or if the set contains the given Zettel identifier.
func (s *Set) ContainsOrNil(zid Zid) bool { return s == nil || s.contains(zid) }



















// AddSlice adds all identifier of the given slice to the set.
func (s *Set) AddSlice(sl Slice) *Set {
	if s == nil {
		return NewSet(sl...)
	}
	s.seq = slices.Grow(s.seq, len(sl))
	for _, zid := range sl {
		s.add(zid)
	}
	return s
}

// SafeSorted returns the set as a new sorted slice of zettel identifier.
func (s *Set) SafeSorted() Slice {


	if s == nil {



		return nil
	}
	return slices.Clone(s.seq)
}

// IntersectOrSet removes all zettel identifier that are not in the other set.
// Both sets can be modified by this method. One of them is the set returned.
// It contains the intersection of both, if s is not nil.
//
// If s == nil, then the other set is always returned.
func (s *Set) IntersectOrSet(other *Set) *Set {
	if s == nil || other == nil {
		return other
	}
	topos, spos, opos := 0, 0, 0
	for spos < len(s.seq) && opos < len(other.seq) {
		sz, oz := s.seq[spos], other.seq[opos]
		if sz < oz {
			spos++
			continue
		}
		if sz > oz {
			opos++
			continue
		}
		s.seq[topos] = sz
		topos++
		spos++
		opos++
	}
	s.seq = s.seq[:topos]
	return s
}

// IUnion adds the elements of set other to s.
func (s *Set) IUnion(other *Set) *Set {
	if other == nil || len(other.seq) == 0 {
		return s
	}
	// TODO: if other is large enough (and s is not too small) -> optimize by swapping and/or loop through both
	return s.AddSlice(other.seq)
}

// ISubstract removes all zettel identifier from 's' that are in the set 'other'.
func (s *Set) ISubstract(other *Set) {
	if s == nil || len(s.seq) == 0 || other == nil || len(other.seq) == 0 {
		return
	}
	topos, spos, opos := 0, 0, 0
	for spos < len(s.seq) && opos < len(other.seq) {
		sz, oz := s.seq[spos], other.seq[opos]
		if sz < oz {
			s.seq[topos] = sz
			topos++
			spos++
			continue
		}
		if sz == oz {
			spos++
		}
		opos++
	}
	for spos < len(s.seq) {
		s.seq[topos] = s.seq[spos]
		topos++
		spos++
	}
	s.seq = s.seq[:topos]
}

// Diff returns the difference sets between the two sets: the first difference
// set is the set of elements that are in other, but not in s; the second
// difference set is the set of element that are in s but not in other.
//
// in other words: the first result is the set of elements from other that must
// be added to s; the second result is the set of elements that must be removed
// from s, so that s would have the same elemest as other.
func (s *Set) Diff(other *Set) (newS, remS *Set) {
	if s == nil || len(s.seq) == 0 {
		return other.Clone(), nil
	}
	if other == nil || len(other.seq) == 0 {
		return nil, s.Clone()
	}
	seqS, seqO := s.seq, other.seq
	var newRefs, remRefs Slice
	npos, opos := 0, 0
	for npos < len(seqO) && opos < len(seqS) {
		rn, ro := seqO[npos], seqS[opos]
		if rn == ro {
			npos++
			opos++
			continue
		}
		if rn < ro {
			newRefs = append(newRefs, rn)
			npos++
			continue
		}
		remRefs = append(remRefs, ro)
		opos++
	}
	if npos < len(seqO) {
		newRefs = append(newRefs, seqO[npos:]...)
	}
	if opos < len(seqS) {
		remRefs = append(remRefs, seqS[opos:]...)
	}
	return newFromSlice(newRefs), newFromSlice(remRefs)
}

// Remove the identifier from the set.
func (s *Set) Remove(zid Zid) *Set {
	if s == nil || len(s.seq) == 0 {
		return nil
	}
	if pos, found := s.find(zid); found {
		copy(s.seq[pos:], s.seq[pos+1:])
		s.seq = s.seq[:len(s.seq)-1]
	}
	if len(s.seq) == 0 {
		return nil
	}
	return s
}

// Equal returns true if the other set is equal to the given set.
func (s *Set) Equal(other *Set) bool {
	if s == nil {
		return other == nil
	}
	if other == nil {
		return false
	}
	return slices.Equal(s.seq, other.seq)
}

// ForEach calls the given function for each element of the set.
//
// Every element is bigger than the previous one.
func (s *Set) ForEach(fn func(zid Zid)) {
	if s != nil {
		for _, zid := range s.seq {
			fn(zid)
		}
	}
}

// Pop return one arbitrary element of the set.
func (s *Set) Pop() (Zid, bool) {
	if s != nil {
		if l := len(s.seq); l > 0 {
			zid := s.seq[l-1]
			s.seq = s.seq[:l-1]
			return zid, true
		}
	}
	return Invalid, false
}

// Optimize the amount of memory to store the set.
func (s *Set) Optimize() {
	if s != nil {
		s.seq = slices.Clip(s.seq)
	}
}

// ----- unchecked base operations

func newFromSlice(seq Slice) *Set {
	if l := len(seq); l == 0 {
		return nil
	} else {
		return &Set{seq: seq}
	}
}

func (s *Set) add(zid Zid) {
	if pos, found := s.find(zid); !found {
		s.seq = slices.Insert(s.seq, pos, zid)
	}
}

func (s *Set) contains(zid Zid) bool {
	_, found := s.find(zid)
	return found
}

func (s *Set) find(zid Zid) (int, bool) {
	hi := len(s.seq)
	for lo := 0; lo < hi; {
		m := lo + (hi-lo)/2
		if z := s.seq[m]; z == zid {
			return m, true
		} else if z < zid {
			lo = m + 1
		} else {
			hi = m
		}
	}
	return hi, false
}

Changes to zettel/id/set_test.go.

15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113





























































































114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152

import (
	"testing"

	"zettelstore.de/z/zettel/id"
)

func TestSetContains(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		s   id.Set
		zid id.Zid
		exp bool
	}{
		{nil, id.Invalid, true},
		{nil, 14, true},
		{id.NewSet(), id.Invalid, false},
		{id.NewSet(), 1, false},
		{id.NewSet(), id.Invalid, false},
		{id.NewSet(1), 1, true},
	}
	for i, tc := range testcases {
		got := tc.s.ContainsOrNil(tc.zid)
		if got != tc.exp {
			t.Errorf("%d: %v.Contains(%v) == %v, but got %v", i, tc.s, tc.zid, tc.exp, got)
		}
	}
}

func TestSetAdd(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		s1, s2 id.Set
		exp    id.Slice
	}{
		{nil, nil, nil},
		{id.NewSet(), nil, nil},
		{id.NewSet(), id.NewSet(), nil},
		{nil, id.NewSet(1), id.Slice{1}},
		{id.NewSet(1), nil, id.Slice{1}},
		{id.NewSet(1), id.NewSet(), id.Slice{1}},
		{id.NewSet(1), id.NewSet(2), id.Slice{1, 2}},
		{id.NewSet(1), id.NewSet(1), id.Slice{1}},
	}
	for i, tc := range testcases {
		sl1 := tc.s1.Sorted()
		sl2 := tc.s2.Sorted()
		got := tc.s1.Copy(tc.s2).Sorted()
		if !got.Equal(tc.exp) {
			t.Errorf("%d: %v.Add(%v) should be %v, but got %v", i, sl1, sl2, tc.exp, got)
		}
	}
}

func TestSetSorted(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		set id.Set
		exp id.Slice
	}{
		{nil, nil},
		{id.NewSet(), nil},
		{id.NewSet(9, 4, 6, 1, 7), id.Slice{1, 4, 6, 7, 9}},
	}
	for i, tc := range testcases {
		got := tc.set.Sorted()
		if !got.Equal(tc.exp) {
			t.Errorf("%d: %v.Sorted() should be %v, but got %v", i, tc.set, tc.exp, got)
		}
	}
}

func TestSetIntersectOrSet(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		s1, s2 id.Set
		exp    id.Slice
	}{
		{nil, nil, nil},
		{id.NewSet(), nil, nil},
		{nil, id.NewSet(), nil},
		{id.NewSet(), id.NewSet(), nil},
		{id.NewSet(1), nil, nil},
		{nil, id.NewSet(1), id.Slice{1}},
		{id.NewSet(1), id.NewSet(), nil},
		{id.NewSet(), id.NewSet(1), nil},
		{id.NewSet(1), id.NewSet(2), nil},
		{id.NewSet(2), id.NewSet(1), nil},
		{id.NewSet(1), id.NewSet(1), id.Slice{1}},
	}
	for i, tc := range testcases {
		sl1 := tc.s1.Sorted()
		sl2 := tc.s2.Sorted()
		got := tc.s1.IntersectOrSet(tc.s2).Sorted()
		if !got.Equal(tc.exp) {
			t.Errorf("%d: %v.IntersectOrSet(%v) should be %v, but got %v", i, sl1, sl2, tc.exp, got)
		}
	}
}






























































































func TestSetRemove(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		s1, s2 id.Set
		exp    id.Slice
	}{
		{nil, nil, nil},
		{id.NewSet(), nil, nil},
		{id.NewSet(), id.NewSet(), nil},
		{id.NewSet(1), nil, id.Slice{1}},
		{id.NewSet(1), id.NewSet(), id.Slice{1}},
		{id.NewSet(1), id.NewSet(2), id.Slice{1}},
		{id.NewSet(1), id.NewSet(1), id.Slice{}},
	}
	for i, tc := range testcases {
		sl1 := tc.s1.Sorted()
		sl2 := tc.s2.Sorted()
		newS1 := id.NewSet(sl1...)
		newS1.Substract(tc.s2)
		got := newS1.Sorted()
		if !got.Equal(tc.exp) {
			t.Errorf("%d: %v.Remove(%v) should be %v, but got %v", i, sl1, sl2, tc.exp, got)
		}
	}
}

//	func BenchmarkSet(b *testing.B) {
//		s := id.Set{}
//		for range b.N {
//			s[id.Zid(i)] = true
//		}
//	}
func BenchmarkSet(b *testing.B) {
	s := id.Set{}
	for i := range b.N {
		s[id.Zid(i)] = struct{}{}
	}
}







|


|













|







|












|
|
|






|


|







|

|







|















|
|
|





>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>




|











|
|

|
|






<
<
<
<
<
<

|

|


15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233






234
235
236
237
238
239

import (
	"testing"

	"zettelstore.de/z/zettel/id"
)

func TestSetContainsOrNil(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		s   *id.Set
		zid id.Zid
		exp bool
	}{
		{nil, id.Invalid, true},
		{nil, 14, true},
		{id.NewSet(), id.Invalid, false},
		{id.NewSet(), 1, false},
		{id.NewSet(), id.Invalid, false},
		{id.NewSet(1), 1, true},
	}
	for i, tc := range testcases {
		got := tc.s.ContainsOrNil(tc.zid)
		if got != tc.exp {
			t.Errorf("%d: %v.ContainsOrNil(%v) == %v, but got %v", i, tc.s, tc.zid, tc.exp, got)
		}
	}
}

func TestSetAdd(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		s1, s2 *id.Set
		exp    id.Slice
	}{
		{nil, nil, nil},
		{id.NewSet(), nil, nil},
		{id.NewSet(), id.NewSet(), nil},
		{nil, id.NewSet(1), id.Slice{1}},
		{id.NewSet(1), nil, id.Slice{1}},
		{id.NewSet(1), id.NewSet(), id.Slice{1}},
		{id.NewSet(1), id.NewSet(2), id.Slice{1, 2}},
		{id.NewSet(1), id.NewSet(1), id.Slice{1}},
	}
	for i, tc := range testcases {
		sl1 := tc.s1.SafeSorted()
		sl2 := tc.s2.SafeSorted()
		got := tc.s1.IUnion(tc.s2).SafeSorted()
		if !got.Equal(tc.exp) {
			t.Errorf("%d: %v.Add(%v) should be %v, but got %v", i, sl1, sl2, tc.exp, got)
		}
	}
}

func TestSetSafeSorted(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		set *id.Set
		exp id.Slice
	}{
		{nil, nil},
		{id.NewSet(), nil},
		{id.NewSet(9, 4, 6, 1, 7), id.Slice{1, 4, 6, 7, 9}},
	}
	for i, tc := range testcases {
		got := tc.set.SafeSorted()
		if !got.Equal(tc.exp) {
			t.Errorf("%d: %v.SafeSorted() should be %v, but got %v", i, tc.set, tc.exp, got)
		}
	}
}

func TestSetIntersectOrSet(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		s1, s2 *id.Set
		exp    id.Slice
	}{
		{nil, nil, nil},
		{id.NewSet(), nil, nil},
		{nil, id.NewSet(), nil},
		{id.NewSet(), id.NewSet(), nil},
		{id.NewSet(1), nil, nil},
		{nil, id.NewSet(1), id.Slice{1}},
		{id.NewSet(1), id.NewSet(), nil},
		{id.NewSet(), id.NewSet(1), nil},
		{id.NewSet(1), id.NewSet(2), nil},
		{id.NewSet(2), id.NewSet(1), nil},
		{id.NewSet(1), id.NewSet(1), id.Slice{1}},
	}
	for i, tc := range testcases {
		sl1 := tc.s1.SafeSorted()
		sl2 := tc.s2.SafeSorted()
		got := tc.s1.IntersectOrSet(tc.s2).SafeSorted()
		if !got.Equal(tc.exp) {
			t.Errorf("%d: %v.IntersectOrSet(%v) should be %v, but got %v", i, sl1, sl2, tc.exp, got)
		}
	}
}

func TestSetIUnion(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		s1, s2 *id.Set
		exp    *id.Set
	}{
		{nil, nil, nil},
		{id.NewSet(), nil, nil},
		{nil, id.NewSet(), nil},
		{id.NewSet(), id.NewSet(), nil},
		{id.NewSet(1), nil, id.NewSet(1)},
		{nil, id.NewSet(1), id.NewSet(1)},
		{id.NewSet(1), id.NewSet(), id.NewSet(1)},
		{id.NewSet(), id.NewSet(1), id.NewSet(1)},
		{id.NewSet(1), id.NewSet(2), id.NewSet(1, 2)},
		{id.NewSet(2), id.NewSet(1), id.NewSet(2, 1)},
		{id.NewSet(1), id.NewSet(1), id.NewSet(1)},
		{id.NewSet(1, 2, 3), id.NewSet(2, 3, 4), id.NewSet(1, 2, 3, 4)},
	}
	for i, tc := range testcases {
		s1 := tc.s1.Clone()
		sl1 := s1.SafeSorted()
		sl2 := tc.s2.SafeSorted()
		got := s1.IUnion(tc.s2)
		if !got.Equal(tc.exp) {
			t.Errorf("%d: %v.IUnion(%v) should be %v, but got %v", i, sl1, sl2, tc.exp, got)
		}
	}
}

func TestSetISubtract(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		s1, s2 *id.Set
		exp    id.Slice
	}{
		{nil, nil, nil},
		{id.NewSet(), nil, nil},
		{nil, id.NewSet(), nil},
		{id.NewSet(), id.NewSet(), nil},
		{id.NewSet(1), nil, id.Slice{1}},
		{nil, id.NewSet(1), nil},
		{id.NewSet(1), id.NewSet(), id.Slice{1}},
		{id.NewSet(), id.NewSet(1), nil},
		{id.NewSet(1), id.NewSet(2), id.Slice{1}},
		{id.NewSet(2), id.NewSet(1), id.Slice{2}},
		{id.NewSet(1), id.NewSet(1), nil},
		{id.NewSet(1, 2, 3), id.NewSet(1), id.Slice{2, 3}},
		{id.NewSet(1, 2, 3), id.NewSet(2), id.Slice{1, 3}},
		{id.NewSet(1, 2, 3), id.NewSet(3), id.Slice{1, 2}},
		{id.NewSet(1, 2, 3), id.NewSet(1, 2), id.Slice{3}},
		{id.NewSet(1, 2, 3), id.NewSet(1, 3), id.Slice{2}},
		{id.NewSet(1, 2, 3), id.NewSet(2, 3), id.Slice{1}},
	}
	for i, tc := range testcases {
		s1 := tc.s1.Clone()
		sl1 := s1.SafeSorted()
		sl2 := tc.s2.SafeSorted()
		s1.ISubstract(tc.s2)
		got := s1.SafeSorted()
		if !got.Equal(tc.exp) {
			t.Errorf("%d: %v.ISubstract(%v) should be %v, but got %v", i, sl1, sl2, tc.exp, got)
		}
	}
}

func TestSetDiff(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		in1, in2   *id.Set
		exp1, exp2 *id.Set
	}{
		{nil, nil, nil, nil},
		{id.NewSet(1), nil, nil, id.NewSet(1)},
		{nil, id.NewSet(1), id.NewSet(1), nil},
		{id.NewSet(1), id.NewSet(1), nil, nil},
		{id.NewSet(1, 2), id.NewSet(1), nil, id.NewSet(2)},
		{id.NewSet(1), id.NewSet(1, 2), id.NewSet(2), nil},
		{id.NewSet(1, 2), id.NewSet(1, 3), id.NewSet(3), id.NewSet(2)},
		{id.NewSet(1, 2, 3), id.NewSet(2, 3, 4), id.NewSet(4), id.NewSet(1)},
		{id.NewSet(2, 3, 4), id.NewSet(1, 2, 3), id.NewSet(1), id.NewSet(4)},
	}
	for i, tc := range testcases {
		gotN, gotO := tc.in1.Diff(tc.in2)
		if !tc.exp1.Equal(gotN) {
			t.Errorf("%d: expected %v, but got: %v", i, tc.exp1, gotN)
		}
		if !tc.exp2.Equal(gotO) {
			t.Errorf("%d: expected %v, but got: %v", i, tc.exp2, gotO)
		}
	}
}

func TestSetRemove(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		s1, s2 *id.Set
		exp    id.Slice
	}{
		{nil, nil, nil},
		{id.NewSet(), nil, nil},
		{id.NewSet(), id.NewSet(), nil},
		{id.NewSet(1), nil, id.Slice{1}},
		{id.NewSet(1), id.NewSet(), id.Slice{1}},
		{id.NewSet(1), id.NewSet(2), id.Slice{1}},
		{id.NewSet(1), id.NewSet(1), id.Slice{}},
	}
	for i, tc := range testcases {
		sl1 := tc.s1.SafeSorted()
		sl2 := tc.s2.SafeSorted()
		newS1 := id.NewSet(sl1...)
		newS1.ISubstract(tc.s2)
		got := newS1.SafeSorted()
		if !got.Equal(tc.exp) {
			t.Errorf("%d: %v.Remove(%v) should be %v, but got %v", i, sl1, sl2, tc.exp, got)
		}
	}
}







func BenchmarkSet(b *testing.B) {
	s := id.NewSetCap(b.N)
	for i := range b.N {
		s.Add(id.Zid(i))
	}
}

Changes to zettel/id/slice.go.

27
28
29
30
31
32
33

34
35
36
37
38
39
40
41
// Clone a zettel identifier slice
func (zs Slice) Clone() Slice { return slices.Clone(zs) }

// Equal reports whether zs and other are the same length and contain the samle zettel
// identifier. A nil argument is equivalent to an empty slice.
func (zs Slice) Equal(other Slice) bool { return slices.Equal(zs, other) }


func (zs Slice) String() string {
	if len(zs) == 0 {
		return ""
	}
	var sb strings.Builder
	for i, zid := range zs {
		if i > 0 {
			sb.WriteByte(' ')







>
|







27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
// Clone a zettel identifier slice
func (zs Slice) Clone() Slice { return slices.Clone(zs) }

// Equal reports whether zs and other are the same length and contain the samle zettel
// identifier. A nil argument is equivalent to an empty slice.
func (zs Slice) Equal(other Slice) bool { return slices.Equal(zs, other) }

// MetaString returns the slice as a string to be store in metadata.
func (zs Slice) MetaString() string {
	if len(zs) == 0 {
		return ""
	}
	var sb strings.Builder
	for i, zid := range zs {
		if i > 0 {
			sb.WriteByte(' ')

Changes to zettel/id/slice_test.go.

65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
		got = tc.s2.Equal(tc.s1)
		if got != tc.exp {
			t.Errorf("%d/%v.Equal(%v)==%v, but got %v", i, tc.s2, tc.s1, tc.exp, got)
		}
	}
}

func TestSliceString(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		in  id.Slice
		exp string
	}{
		{nil, ""},
		{id.Slice{}, ""},
		{id.Slice{1}, "00000000000001"},
		{id.Slice{1, 2}, "00000000000001 00000000000002"},
	}
	for i, tc := range testcases {
		got := tc.in.String()
		if got != tc.exp {
			t.Errorf("%d/%v: expected %q, but got %q", i, tc.in, tc.exp, got)
		}
	}
}







|











|





65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
		got = tc.s2.Equal(tc.s1)
		if got != tc.exp {
			t.Errorf("%d/%v.Equal(%v)==%v, but got %v", i, tc.s2, tc.s1, tc.exp, got)
		}
	}
}

func TestSliceMetaString(t *testing.T) {
	t.Parallel()
	testcases := []struct {
		in  id.Slice
		exp string
	}{
		{nil, ""},
		{id.Slice{}, ""},
		{id.Slice{1}, "00000000000001"},
		{id.Slice{1, 2}, "00000000000001 00000000000002"},
	}
	for i, tc := range testcases {
		got := tc.in.MetaString()
		if got != tc.exp {
			t.Errorf("%d/%v: expected %q, but got %q", i, tc.in, tc.exp, got)
		}
	}
}