|
@@ -1,595 +0,0 @@
|
1
|
|
-"Fanout cache automatically shards keys and values."
|
2
|
|
-
|
3
|
|
-import itertools as it
|
4
|
|
-import os.path as op
|
5
|
|
-import sqlite3
|
6
|
|
-import time
|
7
|
|
-
|
8
|
|
-from .core import ENOVAL, DEFAULT_SETTINGS, Cache, Disk, Timeout
|
9
|
|
-from .memo import memoize
|
10
|
|
-from .persistent import Deque, Index
|
11
|
|
-
|
12
|
|
-
|
13
|
|
-class FanoutCache(object):
|
14
|
|
- "Cache that shards keys and values."
|
15
|
|
- def __init__(self, directory, shards=8, timeout=0.010, disk=Disk,
|
16
|
|
- **settings):
|
17
|
|
- """Initialize cache instance.
|
18
|
|
-
|
19
|
|
- :param str directory: cache directory
|
20
|
|
- :param int shards: number of shards to distribute writes
|
21
|
|
- :param float timeout: SQLite connection timeout
|
22
|
|
- :param disk: `Disk` instance for serialization
|
23
|
|
- :param settings: any of `DEFAULT_SETTINGS`
|
24
|
|
-
|
25
|
|
- """
|
26
|
|
- self._directory = directory
|
27
|
|
- self._count = shards
|
28
|
|
- default_size_limit = DEFAULT_SETTINGS['size_limit']
|
29
|
|
- size_limit = settings.pop('size_limit', default_size_limit) / shards
|
30
|
|
- self._shards = tuple(
|
31
|
|
- Cache(
|
32
|
|
- op.join(directory, '%03d' % num),
|
33
|
|
- timeout=timeout,
|
34
|
|
- disk=disk,
|
35
|
|
- size_limit=size_limit,
|
36
|
|
- **settings
|
37
|
|
- )
|
38
|
|
- for num in range(shards)
|
39
|
|
- )
|
40
|
|
- self._hash = self._shards[0].disk.hash
|
41
|
|
- self._deques = {}
|
42
|
|
- self._indexes = {}
|
43
|
|
-
|
44
|
|
-
|
45
|
|
- @property
|
46
|
|
- def directory(self):
|
47
|
|
- """Cache directory."""
|
48
|
|
- return self._directory
|
49
|
|
-
|
50
|
|
-
|
51
|
|
- def __getattr__(self, name):
|
52
|
|
- return getattr(self._shards[0], name)
|
53
|
|
-
|
54
|
|
-
|
55
|
|
- def set(self, key, value, expire=None, read=False, tag=None, retry=False):
|
56
|
|
- """Set `key` and `value` item in cache.
|
57
|
|
-
|
58
|
|
- When `read` is `True`, `value` should be a file-like object opened
|
59
|
|
- for reading in binary mode.
|
60
|
|
-
|
61
|
|
- If database timeout occurs then fails silently unless `retry` is set to
|
62
|
|
- `True` (default `False`).
|
63
|
|
-
|
64
|
|
- :param key: key for item
|
65
|
|
- :param value: value for item
|
66
|
|
- :param float expire: seconds until the key expires
|
67
|
|
- (default None, no expiry)
|
68
|
|
- :param bool read: read value as raw bytes from file (default False)
|
69
|
|
- :param str tag: text to associate with key (default None)
|
70
|
|
- :param bool retry: retry if database timeout expires (default False)
|
71
|
|
- :return: True if item was set
|
72
|
|
-
|
73
|
|
- """
|
74
|
|
- index = self._hash(key) % self._count
|
75
|
|
- set_func = self._shards[index].set
|
76
|
|
-
|
77
|
|
- while True:
|
78
|
|
- try:
|
79
|
|
- return set_func(key, value, expire, read, tag)
|
80
|
|
- except Timeout:
|
81
|
|
- if retry:
|
82
|
|
- continue
|
83
|
|
- else:
|
84
|
|
- return False
|
85
|
|
-
|
86
|
|
-
|
87
|
|
- def __setitem__(self, key, value):
|
88
|
|
- """Set `key` and `value` item in cache.
|
89
|
|
-
|
90
|
|
- Calls :func:`FanoutCache.set` internally with `retry` set to `True`.
|
91
|
|
-
|
92
|
|
- :param key: key for item
|
93
|
|
- :param value: value for item
|
94
|
|
-
|
95
|
|
- """
|
96
|
|
- self.set(key, value, retry=True)
|
97
|
|
-
|
98
|
|
-
|
99
|
|
- def add(self, key, value, expire=None, read=False, tag=None, retry=False):
|
100
|
|
- """Add `key` and `value` item to cache.
|
101
|
|
-
|
102
|
|
- Similar to `set`, but only add to cache if key not present.
|
103
|
|
-
|
104
|
|
- This operation is atomic. Only one concurrent add operation for given
|
105
|
|
- key from separate threads or processes will succeed.
|
106
|
|
-
|
107
|
|
- When `read` is `True`, `value` should be a file-like object opened
|
108
|
|
- for reading in binary mode.
|
109
|
|
-
|
110
|
|
- :param key: key for item
|
111
|
|
- :param value: value for item
|
112
|
|
- :param float expire: seconds until the key expires
|
113
|
|
- (default None, no expiry)
|
114
|
|
- :param bool read: read value as bytes from file (default False)
|
115
|
|
- :param str tag: text to associate with key (default None)
|
116
|
|
- :param bool retry: retry if database timeout expires (default False)
|
117
|
|
- :return: True if item was added
|
118
|
|
-
|
119
|
|
- """
|
120
|
|
- index = self._hash(key) % self._count
|
121
|
|
- add_func = self._shards[index].add
|
122
|
|
-
|
123
|
|
- while True:
|
124
|
|
- try:
|
125
|
|
- return add_func(key, value, expire, read, tag)
|
126
|
|
- except Timeout:
|
127
|
|
- if retry:
|
128
|
|
- continue
|
129
|
|
- else:
|
130
|
|
- return False
|
131
|
|
-
|
132
|
|
-
|
133
|
|
- def incr(self, key, delta=1, default=0, retry=False):
|
134
|
|
- """Increment value by delta for item with key.
|
135
|
|
-
|
136
|
|
- If key is missing and default is None then raise KeyError. Else if key
|
137
|
|
- is missing and default is not None then use default for value.
|
138
|
|
-
|
139
|
|
- Operation is atomic. All concurrent increment operations will be
|
140
|
|
- counted individually.
|
141
|
|
-
|
142
|
|
- Assumes value may be stored in a SQLite column. Most builds that target
|
143
|
|
- machines with 64-bit pointer widths will support 64-bit signed
|
144
|
|
- integers.
|
145
|
|
-
|
146
|
|
- :param key: key for item
|
147
|
|
- :param int delta: amount to increment (default 1)
|
148
|
|
- :param int default: value if key is missing (default 0)
|
149
|
|
- :param bool retry: retry if database timeout expires (default False)
|
150
|
|
- :return: new value for item on success else None
|
151
|
|
- :raises KeyError: if key is not found and default is None
|
152
|
|
-
|
153
|
|
- """
|
154
|
|
- index = self._hash(key) % self._count
|
155
|
|
- incr_func = self._shards[index].incr
|
156
|
|
-
|
157
|
|
- while True:
|
158
|
|
- try:
|
159
|
|
- return incr_func(key, delta, default)
|
160
|
|
- except Timeout:
|
161
|
|
- if retry:
|
162
|
|
- continue
|
163
|
|
- else:
|
164
|
|
- return None
|
165
|
|
-
|
166
|
|
-
|
167
|
|
- def decr(self, key, delta=1, default=0, retry=False):
|
168
|
|
- """Decrement value by delta for item with key.
|
169
|
|
-
|
170
|
|
- If key is missing and default is None then raise KeyError. Else if key
|
171
|
|
- is missing and default is not None then use default for value.
|
172
|
|
-
|
173
|
|
- Operation is atomic. All concurrent decrement operations will be
|
174
|
|
- counted individually.
|
175
|
|
-
|
176
|
|
- Unlike Memcached, negative values are supported. Value may be
|
177
|
|
- decremented below zero.
|
178
|
|
-
|
179
|
|
- Assumes value may be stored in a SQLite column. Most builds that target
|
180
|
|
- machines with 64-bit pointer widths will support 64-bit signed
|
181
|
|
- integers.
|
182
|
|
-
|
183
|
|
- :param key: key for item
|
184
|
|
- :param int delta: amount to decrement (default 1)
|
185
|
|
- :param int default: value if key is missing (default 0)
|
186
|
|
- :param bool retry: retry if database timeout expires (default False)
|
187
|
|
- :return: new value for item on success else None
|
188
|
|
- :raises KeyError: if key is not found and default is None
|
189
|
|
-
|
190
|
|
- """
|
191
|
|
- return self.incr(key, -delta, default, retry)
|
192
|
|
-
|
193
|
|
-
|
194
|
|
- def get(self, key, default=None, read=False, expire_time=False, tag=False,
|
195
|
|
- retry=False):
|
196
|
|
- """Retrieve value from cache. If `key` is missing, return `default`.
|
197
|
|
-
|
198
|
|
- If database timeout occurs then returns `default` unless `retry` is set
|
199
|
|
- to `True` (default `False`).
|
200
|
|
-
|
201
|
|
- :param key: key for item
|
202
|
|
- :param default: return value if key is missing (default None)
|
203
|
|
- :param bool read: if True, return file handle to value
|
204
|
|
- (default False)
|
205
|
|
- :param float expire_time: if True, return expire_time in tuple
|
206
|
|
- (default False)
|
207
|
|
- :param tag: if True, return tag in tuple (default False)
|
208
|
|
- :param bool retry: retry if database timeout expires (default False)
|
209
|
|
- :return: value for item if key is found else default
|
210
|
|
-
|
211
|
|
- """
|
212
|
|
- index = self._hash(key) % self._count
|
213
|
|
- get_func = self._shards[index].get
|
214
|
|
-
|
215
|
|
- while True:
|
216
|
|
- try:
|
217
|
|
- return get_func(
|
218
|
|
- key, default=default, read=read, expire_time=expire_time,
|
219
|
|
- tag=tag,
|
220
|
|
- )
|
221
|
|
- except (Timeout, sqlite3.OperationalError):
|
222
|
|
- if retry:
|
223
|
|
- continue
|
224
|
|
- else:
|
225
|
|
- return default
|
226
|
|
-
|
227
|
|
-
|
228
|
|
- def __getitem__(self, key):
|
229
|
|
- """Return corresponding value for `key` from cache.
|
230
|
|
-
|
231
|
|
- Calls :func:`FanoutCache.get` internally with `retry` set to `True`.
|
232
|
|
-
|
233
|
|
- :param key: key for item
|
234
|
|
- :return: value for item
|
235
|
|
- :raises KeyError: if key is not found
|
236
|
|
-
|
237
|
|
- """
|
238
|
|
- value = self.get(key, default=ENOVAL, retry=True)
|
239
|
|
-
|
240
|
|
- if value is ENOVAL:
|
241
|
|
- raise KeyError(key)
|
242
|
|
-
|
243
|
|
- return value
|
244
|
|
-
|
245
|
|
-
|
246
|
|
- def read(self, key):
|
247
|
|
- """Return file handle corresponding to `key` from cache.
|
248
|
|
-
|
249
|
|
- :param key: key for item
|
250
|
|
- :return: file open for reading in binary mode
|
251
|
|
- :raises KeyError: if key is not found
|
252
|
|
-
|
253
|
|
- """
|
254
|
|
- handle = self.get(key, default=ENOVAL, read=True, retry=True)
|
255
|
|
- if handle is ENOVAL:
|
256
|
|
- raise KeyError(key)
|
257
|
|
- return handle
|
258
|
|
-
|
259
|
|
-
|
260
|
|
- def __contains__(self, key):
|
261
|
|
- """Return `True` if `key` matching item is found in cache.
|
262
|
|
-
|
263
|
|
- :param key: key for item
|
264
|
|
- :return: True if key is found
|
265
|
|
-
|
266
|
|
- """
|
267
|
|
- index = self._hash(key) % self._count
|
268
|
|
- return key in self._shards[index]
|
269
|
|
-
|
270
|
|
-
|
271
|
|
- def pop(self, key, default=None, expire_time=False, tag=False,
|
272
|
|
- retry=False):
|
273
|
|
- """Remove corresponding item for `key` from cache and return value.
|
274
|
|
-
|
275
|
|
- If `key` is missing, return `default`.
|
276
|
|
-
|
277
|
|
- Operation is atomic. Concurrent operations will be serialized.
|
278
|
|
-
|
279
|
|
- :param key: key for item
|
280
|
|
- :param default: return value if key is missing (default None)
|
281
|
|
- :param float expire_time: if True, return expire_time in tuple
|
282
|
|
- (default False)
|
283
|
|
- :param tag: if True, return tag in tuple (default False)
|
284
|
|
- :param bool retry: retry if database timeout expires (default False)
|
285
|
|
- :return: value for item if key is found else default
|
286
|
|
-
|
287
|
|
- """
|
288
|
|
- index = self._hash(key) % self._count
|
289
|
|
- pop_func = self._shards[index].pop
|
290
|
|
-
|
291
|
|
- while True:
|
292
|
|
- try:
|
293
|
|
- return pop_func(
|
294
|
|
- key, default=default, expire_time=expire_time, tag=tag,
|
295
|
|
- )
|
296
|
|
- except Timeout:
|
297
|
|
- if retry:
|
298
|
|
- continue
|
299
|
|
- else:
|
300
|
|
- return default
|
301
|
|
-
|
302
|
|
-
|
303
|
|
- def delete(self, key, retry=False):
|
304
|
|
- """Delete corresponding item for `key` from cache.
|
305
|
|
-
|
306
|
|
- Missing keys are ignored.
|
307
|
|
-
|
308
|
|
- If database timeout occurs then fails silently unless `retry` is set to
|
309
|
|
- `True` (default `False`).
|
310
|
|
-
|
311
|
|
- :param key: key for item
|
312
|
|
- :param bool retry: retry if database timeout expires (default False)
|
313
|
|
- :return: True if item was deleted
|
314
|
|
-
|
315
|
|
- """
|
316
|
|
- index = self._hash(key) % self._count
|
317
|
|
- del_func = self._shards[index].__delitem__
|
318
|
|
-
|
319
|
|
- while True:
|
320
|
|
- try:
|
321
|
|
- return del_func(key)
|
322
|
|
- except Timeout:
|
323
|
|
- if retry:
|
324
|
|
- continue
|
325
|
|
- else:
|
326
|
|
- return False
|
327
|
|
- except KeyError:
|
328
|
|
- return False
|
329
|
|
-
|
330
|
|
-
|
331
|
|
- def __delitem__(self, key):
|
332
|
|
- """Delete corresponding item for `key` from cache.
|
333
|
|
-
|
334
|
|
- Calls :func:`FanoutCache.delete` internally with `retry` set to `True`.
|
335
|
|
-
|
336
|
|
- :param key: key for item
|
337
|
|
- :raises KeyError: if key is not found
|
338
|
|
-
|
339
|
|
- """
|
340
|
|
- deleted = self.delete(key, retry=True)
|
341
|
|
-
|
342
|
|
- if not deleted:
|
343
|
|
- raise KeyError(key)
|
344
|
|
-
|
345
|
|
-
|
346
|
|
- memoize = memoize
|
347
|
|
-
|
348
|
|
-
|
349
|
|
- def check(self, fix=False):
|
350
|
|
- """Check database and file system consistency.
|
351
|
|
-
|
352
|
|
- Intended for use in testing and post-mortem error analysis.
|
353
|
|
-
|
354
|
|
- While checking the cache table for consistency, a writer lock is held
|
355
|
|
- on the database. The lock blocks other cache clients from writing to
|
356
|
|
- the database. For caches with many file references, the lock may be
|
357
|
|
- held for a long time. For example, local benchmarking shows that a
|
358
|
|
- cache with 1,000 file references takes ~60ms to check.
|
359
|
|
-
|
360
|
|
- :param bool fix: correct inconsistencies
|
361
|
|
- :return: list of warnings
|
362
|
|
- :raises Timeout: if database timeout expires
|
363
|
|
-
|
364
|
|
- """
|
365
|
|
- return sum((shard.check(fix=fix) for shard in self._shards), [])
|
366
|
|
-
|
367
|
|
-
|
368
|
|
- def expire(self):
|
369
|
|
- """Remove expired items from cache.
|
370
|
|
-
|
371
|
|
- :return: count of items removed
|
372
|
|
-
|
373
|
|
- """
|
374
|
|
- return self._remove('expire', args=(time.time(),))
|
375
|
|
-
|
376
|
|
-
|
377
|
|
- def create_tag_index(self):
|
378
|
|
- """Create tag index on cache database.
|
379
|
|
-
|
380
|
|
- It is better to initialize cache with `tag_index=True` than use this.
|
381
|
|
-
|
382
|
|
- :raises Timeout: if database timeout expires
|
383
|
|
-
|
384
|
|
- """
|
385
|
|
- for shard in self._shards:
|
386
|
|
- shard.create_tag_index()
|
387
|
|
-
|
388
|
|
-
|
389
|
|
- def drop_tag_index(self):
|
390
|
|
- """Drop tag index on cache database.
|
391
|
|
-
|
392
|
|
- :raises Timeout: if database timeout expires
|
393
|
|
-
|
394
|
|
- """
|
395
|
|
- for shard in self._shards:
|
396
|
|
- shard.drop_tag_index()
|
397
|
|
-
|
398
|
|
-
|
399
|
|
- def evict(self, tag):
|
400
|
|
- """Remove items with matching `tag` from cache.
|
401
|
|
-
|
402
|
|
- :param str tag: tag identifying items
|
403
|
|
- :return: count of items removed
|
404
|
|
-
|
405
|
|
- """
|
406
|
|
- return self._remove('evict', args=(tag,))
|
407
|
|
-
|
408
|
|
-
|
409
|
|
- def cull(self):
|
410
|
|
- """Cull items from cache until volume is less than size limit.
|
411
|
|
-
|
412
|
|
- :return: count of items removed
|
413
|
|
-
|
414
|
|
- """
|
415
|
|
- return self._remove('cull')
|
416
|
|
-
|
417
|
|
-
|
418
|
|
- def clear(self):
|
419
|
|
- """Remove all items from cache.
|
420
|
|
-
|
421
|
|
- :return: count of items removed
|
422
|
|
-
|
423
|
|
- """
|
424
|
|
- return self._remove('clear')
|
425
|
|
-
|
426
|
|
-
|
427
|
|
- def _remove(self, name, args=()):
|
428
|
|
- total = 0
|
429
|
|
- for shard in self._shards:
|
430
|
|
- method = getattr(shard, name)
|
431
|
|
- while True:
|
432
|
|
- try:
|
433
|
|
- count = method(*args)
|
434
|
|
- total += count
|
435
|
|
- except Timeout as timeout:
|
436
|
|
- total += timeout.args[0]
|
437
|
|
- else:
|
438
|
|
- break
|
439
|
|
- return total
|
440
|
|
-
|
441
|
|
-
|
442
|
|
- def stats(self, enable=True, reset=False):
|
443
|
|
- """Return cache statistics hits and misses.
|
444
|
|
-
|
445
|
|
- :param bool enable: enable collecting statistics (default True)
|
446
|
|
- :param bool reset: reset hits and misses to 0 (default False)
|
447
|
|
- :return: (hits, misses)
|
448
|
|
-
|
449
|
|
- """
|
450
|
|
- results = [shard.stats(enable, reset) for shard in self._shards]
|
451
|
|
- return (sum(result[0] for result in results),
|
452
|
|
- sum(result[1] for result in results))
|
453
|
|
-
|
454
|
|
-
|
455
|
|
- def volume(self):
|
456
|
|
- """Return estimated total size of cache on disk.
|
457
|
|
-
|
458
|
|
- :return: size in bytes
|
459
|
|
-
|
460
|
|
- """
|
461
|
|
- return sum(shard.volume() for shard in self._shards)
|
462
|
|
-
|
463
|
|
-
|
464
|
|
- def close(self):
|
465
|
|
- "Close database connection."
|
466
|
|
- for shard in self._shards:
|
467
|
|
- shard.close()
|
468
|
|
- self._deques.clear()
|
469
|
|
- self._indexes.clear()
|
470
|
|
-
|
471
|
|
-
|
472
|
|
- def __enter__(self):
|
473
|
|
- return self
|
474
|
|
-
|
475
|
|
-
|
476
|
|
- def __exit__(self, *exception):
|
477
|
|
- self.close()
|
478
|
|
-
|
479
|
|
-
|
480
|
|
- def __getstate__(self):
|
481
|
|
- return (self._directory, self._count, self.timeout, type(self.disk))
|
482
|
|
-
|
483
|
|
-
|
484
|
|
- def __setstate__(self, state):
|
485
|
|
- self.__init__(*state)
|
486
|
|
-
|
487
|
|
-
|
488
|
|
- def __iter__(self):
|
489
|
|
- "Iterate keys in cache including expired items."
|
490
|
|
- iterators = [iter(shard) for shard in self._shards]
|
491
|
|
- return it.chain.from_iterable(iterators)
|
492
|
|
-
|
493
|
|
-
|
494
|
|
- def __reversed__(self):
|
495
|
|
- "Reverse iterate keys in cache including expired items."
|
496
|
|
- iterators = [reversed(shard) for shard in self._shards]
|
497
|
|
- return it.chain.from_iterable(reversed(iterators))
|
498
|
|
-
|
499
|
|
-
|
500
|
|
- def __len__(self):
|
501
|
|
- "Count of items in cache including expired items."
|
502
|
|
- return sum(len(shard) for shard in self._shards)
|
503
|
|
-
|
504
|
|
-
|
505
|
|
- def reset(self, key, value=ENOVAL):
|
506
|
|
- """Reset `key` and `value` item from Settings table.
|
507
|
|
-
|
508
|
|
- If `value` is not given, it is reloaded from the Settings
|
509
|
|
- table. Otherwise, the Settings table is updated.
|
510
|
|
-
|
511
|
|
- Settings attributes on cache objects are lazy-loaded and
|
512
|
|
- read-only. Use `reset` to update the value.
|
513
|
|
-
|
514
|
|
- Settings with the ``sqlite_`` prefix correspond to SQLite
|
515
|
|
- pragmas. Updating the value will execute the corresponding PRAGMA
|
516
|
|
- statement.
|
517
|
|
-
|
518
|
|
- :param str key: Settings key for item
|
519
|
|
- :param value: value for item (optional)
|
520
|
|
- :return: updated value for item
|
521
|
|
- :raises Timeout: if database timeout expires
|
522
|
|
-
|
523
|
|
- """
|
524
|
|
- for shard in self._shards:
|
525
|
|
- while True:
|
526
|
|
- try:
|
527
|
|
- result = shard.reset(key, value)
|
528
|
|
- except Timeout:
|
529
|
|
- pass
|
530
|
|
- else:
|
531
|
|
- break
|
532
|
|
- return result
|
533
|
|
-
|
534
|
|
-
|
535
|
|
- def deque(self, name):
|
536
|
|
- """Return Deque with given `name` in subdirectory.
|
537
|
|
-
|
538
|
|
- >>> cache = FanoutCache('/tmp/diskcache/fanoutcache')
|
539
|
|
- >>> deque = cache.deque('test')
|
540
|
|
- >>> deque.clear()
|
541
|
|
- >>> deque.extend('abc')
|
542
|
|
- >>> deque.popleft()
|
543
|
|
- 'a'
|
544
|
|
- >>> deque.pop()
|
545
|
|
- 'c'
|
546
|
|
- >>> len(deque)
|
547
|
|
- 1
|
548
|
|
-
|
549
|
|
- :param str name: subdirectory name for Deque
|
550
|
|
- :return: Deque with given name
|
551
|
|
-
|
552
|
|
- """
|
553
|
|
- _deques = self._deques
|
554
|
|
-
|
555
|
|
- try:
|
556
|
|
- return _deques[name]
|
557
|
|
- except KeyError:
|
558
|
|
- parts = name.split('/')
|
559
|
|
- directory = op.join(self._directory, 'deque', *parts)
|
560
|
|
- temp = Deque(directory=directory)
|
561
|
|
- _deques[name] = temp
|
562
|
|
- return temp
|
563
|
|
-
|
564
|
|
-
|
565
|
|
- def index(self, name):
|
566
|
|
- """Return Index with given `name` in subdirectory.
|
567
|
|
-
|
568
|
|
- >>> cache = FanoutCache('/tmp/diskcache/fanoutcache')
|
569
|
|
- >>> index = cache.index('test')
|
570
|
|
- >>> index.clear()
|
571
|
|
- >>> index['abc'] = 123
|
572
|
|
- >>> index['def'] = 456
|
573
|
|
- >>> index['ghi'] = 789
|
574
|
|
- >>> index.popitem()
|
575
|
|
- ('ghi', 789)
|
576
|
|
- >>> del index['abc']
|
577
|
|
- >>> len(index)
|
578
|
|
- 1
|
579
|
|
- >>> index['def']
|
580
|
|
- 456
|
581
|
|
-
|
582
|
|
- :param str name: subdirectory name for Index
|
583
|
|
- :return: Index with given name
|
584
|
|
-
|
585
|
|
- """
|
586
|
|
- _indexes = self._indexes
|
587
|
|
-
|
588
|
|
- try:
|
589
|
|
- return _indexes[name]
|
590
|
|
- except KeyError:
|
591
|
|
- parts = name.split('/')
|
592
|
|
- directory = op.join(self._directory, 'index', *parts)
|
593
|
|
- temp = Index(directory)
|
594
|
|
- _indexes[name] = temp
|
595
|
|
- return temp
|