about summary refs log tree commit diff stats
path: root/worker/worker.go
Commit message (Collapse)AuthorAgeFilesLines
* Register worker in init.Reto Brunner2019-07-191-17/+7
| | | | | This allows backends which can't always be compiled due to missing dependencies (say libnotmuch) to be compiled conditionally with buildflags.
* maildir: Watch for new messagesBen Burwell2019-07-171-1/+5
| | | | | | | | When a directory is opened, start watching its "new" subdirectory for incoming messages using the fsnotify library. When creation events are detected, run the Unseen routine to move the message from new to cur and add new UIDs to the store, updating the UI's list of directory contents as we go.
* Add maildir backend workerBen Burwell2019-07-121-0/+3
| | | | | | | | | | | | | Add the initial implementation of a backend for Maildir accounts. Much of the functionality required is implemented in the go-message and go-maildir libraries, so we use them as much as possible. The maildir worker hooks into a new maildir:// URL scheme in the accounts.conf file which points to a container of several maildir directories. From there, the OpenDirectory, FetchDirectoryContents, etc messages work on subdirectories. This is implemented as a Container struct which handles mapping between the symbolic email folder names and UIDs to the concrete directories and file names.
* Load IMAP worker for imap+insecureDrew DeVault2019-05-201-1/+7
|
* s/aerc2/aerc/gDrew DeVault2019-05-171-2/+2
|
* worker/types: fix Worker.Callbacks race conditionSimon Ser2019-04-271-6/+1
| | | | | | | | | | | | | Worker.Process* functions were called in different goroutines than Worker.Post*. Protect the map with a mutex. Also make the map unexported to prevent external unprotected accesses. Worker.Process* functions used to delete items from the map. However they didn't delete the element they retrieved: callbacks[msg.InResponseTo()] was read while callbacks[msg] was deleted. I'm not sure I understand why. I tried to delete the element that was accessed - but this broke everything (UI froze at "Connecting..."). I don't believe any elements were actually removed from the map, so the new code just doesn't remove anything.
* Apply gofmtDrew DeVault2018-06-121-1/+2
|
* fallthrough in worker selectionMarkus Ongyerth2018-06-121-1/+1
|
* Reduce boilerplate in worker/UIDrew DeVault2018-02-011-9/+11
|
* Improve loggingDrew DeVault2018-01-311-2/+3
|
* Move worker into account tabDrew DeVault2018-01-111-1/+1
|
* Misc idiomatic fixesemersion2018-01-101-5/+2
|
* Parse account configurationDrew DeVault2018-01-091-3/+17
|
* Initial pass on worker/UI message passingDrew DeVault2018-01-091-0/+18
ecs/aerc/blame/lib/msgstore.go?h=0.5.0&id=27b25174e2f0249a6a1d4ba45b70f8504b63ffb1'>^
77ede6e ^










143289b ^
77ede6e ^

ef6178a ^


95875b1 ^
77ede6e ^


27b2517 ^














77ede6e ^


1f23868 ^


77ede6e ^

ef6178a ^
1f23868 ^









ef6178a ^







ef6178a ^











ef6178a ^
77ede6e ^




ef6178a ^

77ede6e ^




ef6178a ^

77ede6e ^




143289b ^
77ede6e ^


312a53e ^




4465646 ^


312a53e ^










ef6178a ^
4465646 ^

ef6178a ^





312a53e ^
4465646 ^





312a53e ^



4465646 ^
312a53e ^

4465646 ^
312a53e ^
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212


           
            

              





                                                
                                       



                                              
 
                                                    

                                                             
                                                      
                                                                                    








                                                     
                                                      

                                  
                                                                    

                                                                             





                                                             


                                                      






                                                                                    













                                                                                                   
                                                                         










                                                                                    
                                                                                        

                                 


                             
                                                                                     


         














                                                            


                                                         


                                                     

                                           
         









                                                   







                                                            











                                                               
                                




                                                                                   

                                                                                      




                                                                          

                             




                                                                        
                                                      


                                 




                                                        


                                                            










                                                                     
         

                              





                                                                   
 





                                     



                                                  
                                        

                                                                      
                      
 
package lib

import (
	"io"
	"time"

	"github.com/emersion/go-imap"

	"git.sr.ht/~sircmpwn/aerc2/worker/types"
)

type MessageStore struct {
	Deleted  map[uint32]interface{}
	DirInfo  types.DirectoryInfo
	Messages map[uint32]*types.MessageInfo
	// Ordered list of known UIDs
	Uids []uint32

	bodyCallbacks   map[uint32][]func(io.Reader)
	headerCallbacks map[uint32][]func(*types.MessageInfo)

	// Map of uids we've asked the worker to fetch
	onUpdate       func(store *MessageStore) // TODO: multiple onUpdate handlers
	pendingBodies  map[uint32]interface{}
	pendingHeaders map[uint32]interface{}
	worker         *types.Worker
}

func NewMessageStore(worker *types.Worker,
	dirInfo *types.DirectoryInfo) *MessageStore {

	return &MessageStore{
		Deleted: make(map[uint32]interface{}),
		DirInfo: *dirInfo,

		bodyCallbacks:   make(map[uint32][]func(io.Reader)),
		headerCallbacks: make(map[uint32][]func(*types.MessageInfo)),

		pendingBodies:  make(map[uint32]interface{}),
		pendingHeaders: make(map[uint32]interface{}),
		worker:         worker,
	}
}

func (store *MessageStore) FetchHeaders(uids []uint32,
	cb func(*types.MessageInfo)) {

	// TODO: this could be optimized by pre-allocating toFetch and trimming it
	// at the end. In practice we expect to get most messages back in one frame.
	var toFetch imap.SeqSet
	for _, uid := range uids {
		if _, ok := store.pendingHeaders[uid]; !ok {
			toFetch.AddNum(uint32(uid))
			store.pendingHeaders[uid] = nil
			if cb != nil {
				if list, ok := store.headerCallbacks[uid]; ok {
					store.headerCallbacks[uid] = append(list, cb)
				} else {
					store.headerCallbacks[uid] = []func(*types.MessageInfo){cb}
				}
			}
		}
	}
	if !toFetch.Empty() {
		store.worker.PostAction(&types.FetchMessageHeaders{Uids: toFetch}, nil)
	}
}

func (store *MessageStore) FetchFull(uids []uint32, cb func(io.Reader)) {
	// TODO: this could be optimized by pre-allocating toFetch and trimming it
	// at the end. In practice we expect to get most messages back in one frame.
	var toFetch imap.SeqSet
	for _, uid := range uids {
		if _, ok := store.pendingBodies[uid]; !ok {
			toFetch.AddNum(uint32(uid))
			store.pendingBodies[uid] = nil
			if cb != nil {
				if list, ok := store.bodyCallbacks[uid]; ok {
					store.bodyCallbacks[uid] = append(list, cb)
				} else {
					store.bodyCallbacks[uid] = []func(io.Reader){cb}
				}
			}
		}
	}
	if !toFetch.Empty() {
		store.worker.PostAction(&types.FetchFullMessages{Uids: toFetch}, nil)
	}
}

func (store *MessageStore) FetchBodyPart(
	uid uint32, part int, cb func(io.Reader)) {

	store.worker.PostAction(&types.FetchMessageBodyPart{
		Uid:  uid,
		Part: part,
	}, func(resp types.WorkerMessage) {
		msg, ok := resp.(*types.MessageBodyPart)
		if !ok {
			return
		}
		cb(msg.Reader)
	})
}

func (store *MessageStore) merge(
	to *types.MessageInfo, from *types.MessageInfo) {

	if from.BodyStructure != nil {
		to.BodyStructure = from.BodyStructure
	}
	if from.Envelope != nil {
		to.Envelope = from.Envelope
	}
	if len(from.Flags) != 0 {
		to.Flags = from.Flags
	}
	if from.Size != 0 {
		to.Size = from.Size
	}
	var zero time.Time
	if from.InternalDate != zero {
		to.InternalDate = from.InternalDate
	}
}

func (store *MessageStore) Update(msg types.WorkerMessage) {
	update := false
	switch msg := msg.(type) {
	case *types.DirectoryInfo:
		store.DirInfo = *msg
		update = true
	case *types.DirectoryContents:
		newMap := make(map[uint32]*types.MessageInfo)
		for _, uid := range msg.Uids {
			if msg, ok := store.Messages[uid]; ok {
				newMap[uid] = msg
			} else {
				newMap[uid] = nil
			}
		}
		store.Messages = newMap
		store.Uids = msg.Uids
		update = true
	case *types.MessageInfo:
		if existing, ok := store.Messages[msg.Uid]; ok && existing != nil {
			store.merge(existing, msg)
		} else {
			store.Messages[msg.Uid] = msg
		}
		if _, ok := store.pendingHeaders[msg.Uid]; msg.Envelope != nil && ok {
			delete(store.pendingHeaders, msg.Uid)
			if cbs, ok := store.headerCallbacks[msg.Uid]; ok {
				for _, cb := range cbs {
					cb(msg)
				}
			}
		}
		update = true
	case *types.MessageBody:
		if _, ok := store.pendingBodies[msg.Uid]; ok {
			delete(store.pendingBodies, msg.Uid)
			if cbs, ok := store.bodyCallbacks[msg.Uid]; ok {
				for _, cb := range cbs {
					cb(msg.Reader)
				}
			}
		}
	case *types.MessagesDeleted:
		toDelete := make(map[uint32]interface{})
		for _, uid := range msg.Uids {
			toDelete[uid] = nil
			delete(store.Messages, uid)
			if _, ok := store.Deleted[uid]; ok {
				delete(store.Deleted, uid)
			}
		}
		uids := make([]uint32, len(store.Uids)-len(msg.Uids))
		j := 0
		for i, uid := range store.Uids {
			if _, deleted := toDelete[uid]; !deleted {
				uids[j] = store.Uids[i]
				j += 1
			}
		}
		store.Uids = uids
		update = true
	}
	if update {
		store.update()
	}
}

func (store *MessageStore) OnUpdate(fn func(store *MessageStore)) {
	store.onUpdate = fn
}

func (store *MessageStore) update() {
	if store.onUpdate != nil {
		store.onUpdate(store)
	}
}

func (store *MessageStore) Delete(uids []uint32) {
	var set imap.SeqSet
	for _, uid := range uids {
		set.AddNum(uid)
		store.Deleted[uid] = nil
	}
	store.worker.PostAction(&types.DeleteMessages{Uids: set}, nil)
	store.update()
}