View Javadoc

1   /**
2    *  Copyright 2003-2006 Greg Luck
3    *
4    *  Licensed under the Apache License, Version 2.0 (the "License");
5    *  you may not use this file except in compliance with the License.
6    *  You may obtain a copy of the License at
7    *
8    *      http://www.apache.org/licenses/LICENSE-2.0
9    *
10   *  Unless required by applicable law or agreed to in writing, software
11   *  distributed under the License is distributed on an "AS IS" BASIS,
12   *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13   *  See the License for the specific language governing permissions and
14   *  limitations under the License.
15   */
16  
17  
18  package net.sf.ehcache.store;
19  
20  import net.sf.ehcache.Cache;
21  import net.sf.ehcache.CacheException;
22  import net.sf.ehcache.Element;
23  import net.sf.ehcache.Status;
24  import net.sf.ehcache.event.RegisteredEventListeners;
25  import org.apache.commons.logging.Log;
26  import org.apache.commons.logging.LogFactory;
27  
28  import java.io.ByteArrayInputStream;
29  import java.io.ByteArrayOutputStream;
30  import java.io.File;
31  import java.io.FileInputStream;
32  import java.io.FileOutputStream;
33  import java.io.IOException;
34  import java.io.ObjectInputStream;
35  import java.io.ObjectOutputStream;
36  import java.io.ObjectStreamClass;
37  import java.io.RandomAccessFile;
38  import java.io.Serializable;
39  import java.io.StreamCorruptedException;
40  import java.util.ArrayList;
41  import java.util.Collections;
42  import java.util.HashMap;
43  import java.util.HashSet;
44  import java.util.Iterator;
45  import java.util.List;
46  import java.util.Map;
47  import java.util.Set;
48  
49  /**
50   * A disk store implementation.
51   * <p/>
52   * As of ehcache-1.2 (v1.41 of this file) DiskStore has been changed to a mix of finer grained locking using synchronized collections
53   * and synchronizing on the whole instance, as was the case with earlier versions.
54   *
55   * @author Adam Murdoch
56   * @author Greg Luck
57   * @author patches contributed: Ben Houston
58   * @version $Id: DiskStore.java 155 2006-07-15 08:23:12Z gregluck $
59   */
60  public class DiskStore implements Store {
61  
62      /**
63       * If the CacheManager needs to resolve a conflict with the disk path, it will create a
64       * subdirectory in the given disk path with this prefix followed by a number. The presence of this
65       * name is used to determined whether it makes sense for a persistent DiskStore to be loaded. Loading
66       * persistent DiskStores will only have useful semantics where the diskStore path has not changed.
67       */
68      public static final String AUTO_DISK_PATH_DIRECTORY_PREFIX = "ehcache_auto_created";
69  
70      private static final Log LOG = LogFactory.getLog(DiskStore.class.getName());
71      private static final int MS_PER_SECOND = 1000;
72      private static final int SPOOL_THREAD_INTERVAL = 200;
73      private static final int ESTIMATED_MINIMUM_PAYLOAD_SIZE = 512;
74      private long expiryThreadInterval;
75  
76      private final String name;
77      private boolean active;
78      private RandomAccessFile randomAccessFile;
79  
80      private Map diskElements = Collections.synchronizedMap(new HashMap());
81      private List freeSpace = Collections.synchronizedList(new ArrayList());
82      private final Map spool = Collections.synchronizedMap(new HashMap());
83  
84  
85      private Thread spoolThread;
86      private Thread expiryThread;
87  
88      private Cache cache;
89  
90      /**
91       * If persistent, the disk file will be kept
92       * and reused on next startup. In addition the
93       * memory store will flush all contents to spool,
94       * and spool will flush all to disk.
95       */
96      private final boolean persistent;
97  
98      private final String diskPath;
99  
100     private File dataFile;
101 
102     /**
103      * Used to persist elements
104      */
105     private File indexFile;
106 
107     private Status status;
108 
109     /**
110      * The size in bytes of the disk elements
111      */
112     private long totalSize;
113 
114     /**
115      * Creates a disk store.
116      *
117      * @param cache    the {@link net.sf.ehcache.Cache} that the store is part of
118      * @param diskPath the directory in which to create data and index files
119      */
120     public DiskStore(Cache cache, String diskPath) {
121         status = Status.STATUS_UNINITIALISED;
122         this.cache = cache;
123         name = cache.getName();
124         this.diskPath = diskPath;
125         this.expiryThreadInterval = cache.getDiskExpiryThreadIntervalSeconds();
126         this.persistent = cache.isDiskPersistent();
127 
128         try {
129             initialiseFiles();
130 
131             active = true;
132 
133             // Always start up the spool thread
134             spoolThread = new SpoolThread();
135             spoolThread.start();
136 
137             // Start up the expiry thread if not eternal
138             if (!cache.isEternal()) {
139                 expiryThread = new ExpiryThread();
140                 expiryThread.start();
141             }
142 
143             status = Status.STATUS_ALIVE;
144         } catch (final Exception e) {
145             // Cleanup on error
146             dispose();
147             LOG.error(name + "Cache: Could not create disk store. Initial cause was " + e.getMessage(), e);
148         }
149     }
150 
151 
152     private void initialiseFiles() throws Exception {
153         // Make sure the cache directory exists
154         final File diskDir = new File(diskPath);
155         if (diskDir.exists() && !diskDir.isDirectory()) {
156             throw new Exception("Store directory \"" + diskDir.getCanonicalPath() + "\" exists and is not a directory.");
157         }
158         if (!diskDir.exists() && !diskDir.mkdirs()) {
159             throw new Exception("Could not create cache directory \"" + diskDir.getCanonicalPath() + "\".");
160         }
161 
162         dataFile = new File(diskDir, getDataFileName());
163         indexFile = new File(diskDir, getIndexFileName());
164 
165         deleteIndexIfNoData();
166 
167         if (persistent) {
168             if (!readIndex()) {
169                 LOG.debug("Index file dirty or empty. Deleting data file " + getDataFileName());
170                 dataFile.delete();
171             }
172         } else {
173             LOG.debug("Deleting data file " + getDataFileName());
174             dataFile.delete();
175             indexFile = null;
176         }
177 
178         // Open the data file as random access. The dataFile is created if necessary.
179         randomAccessFile = new RandomAccessFile(dataFile, "rw");
180     }
181 
182     private void deleteIndexIfNoData() {
183         boolean dataFileExists = dataFile.exists();
184         boolean indexFileExists = indexFile.exists();
185         if (!dataFileExists && indexFileExists) {
186             LOG.debug("Matching data file missing for index file. Deleting index file " + getIndexFileName());
187             indexFile.delete();
188         }
189     }
190 
191     /**
192      * Asserts that the store is active.
193      */
194     private void checkActive() throws CacheException {
195         if (!active) {
196             throw new CacheException(name + " Cache: The Disk store is not active.");
197         }
198     }
199 
200     /**
201      * Gets an {@link Element} from the Disk Store.
202      *
203      * @return The element
204      */
205     public final synchronized Element get(final Object key) {
206         try {
207             checkActive();
208 
209             // Check in the spool.  Remove if present
210             Element element;
211             synchronized (spool) {
212                 element = (Element) spool.remove(key);
213             }
214             if (element != null) {
215                 element.updateAccessStatistics();
216                 return element;
217             }
218 
219             // Check if the element is on disk
220             final DiskElement diskElement = (DiskElement) diskElements.get(key);
221             if (diskElement == null) {
222                 // Not on disk
223                 return null;
224             }
225 
226             element = loadElementFromDiskElement(diskElement);
227             element.updateAccessStatistics();
228             return element;
229         } catch (Exception exception) {
230             LOG.error(name + "Cache: Could not read disk store element for key " + key + ". Error was "
231                     + exception.getMessage(), exception);
232         }
233         return null;
234     }
235 
236     /**
237      * An unsynchronized and very low cost check to see if a key is in the Store. No check is made to see if the Element is expired.
238      *
239      * @param key The Element key
240      * @return true if found. If this method return false, it means that an Element with the given key is definitely not in the MemoryStore.
241      *         If it returns true, there is an Element there. An attempt to get it may return null if the Element has expired.
242      */
243     public final boolean containsKey(Object key) {
244         return diskElements.containsKey(key) || spool.containsKey(key);
245     }
246 
247     private Element loadElementFromDiskElement(DiskElement diskElement) throws IOException, ClassNotFoundException {
248         Element element;
249         // Load the element
250         randomAccessFile.seek(diskElement.position);
251         final byte[] buffer = new byte[diskElement.payloadSize];
252         randomAccessFile.readFully(buffer);
253         final ByteArrayInputStream instr = new ByteArrayInputStream(buffer);
254         final ObjectInputStream objstr = new ObjectInputStream(instr) {
255             protected Class resolveClass(ObjectStreamClass clazz) throws ClassNotFoundException {
256                 ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
257                 return Class.forName(clazz.getName(), false, classLoader);
258             }
259         };
260         element = (Element) objstr.readObject();
261         objstr.close();
262         return element;
263     }
264 
265     /**
266      * Gets an {@link Element} from the Disk Store, without updating statistics
267      *
268      * @return The element
269      */
270     public final synchronized Element getQuiet(final Object key) {
271         try {
272             checkActive();
273 
274             // Check in the spool.  Remove if present
275             Element element;
276             synchronized (spool) {
277                 element = (Element) spool.remove(key);
278             }
279             if (element != null) {
280                 //element.updateAccessStatistics(); Don't update statistics
281                 return element;
282             }
283 
284             // Check if the element is on disk
285             final DiskElement diskElement = (DiskElement) diskElements.get(key);
286             if (diskElement == null) {
287                 // Not on disk
288                 return null;
289             }
290 
291             element = loadElementFromDiskElement(diskElement);
292             //element.updateAccessStatistics(); Don't update statistics
293             return element;
294         } catch (Exception e) {
295             LOG.error(name + "Cache: Could not read disk store element for key " + key
296                     + ". Initial cause was " + e.getMessage(), e);
297         }
298         return null;
299     }
300 
301 
302     /**
303      * Gets an Array of the keys for all elements in the disk store.
304      *
305      * @return An Object[] of {@link Serializable} keys
306      * @noinspection SynchronizeOnNonFinalField
307      */
308     public final synchronized Object[] getKeyArray() {
309         Set elementKeySet;
310         synchronized (diskElements) {
311             elementKeySet = diskElements.keySet();
312         }
313         Set spoolKeySet;
314         synchronized (spool) {
315             spoolKeySet = spool.keySet();
316         }
317         Set allKeysSet = new HashSet(elementKeySet.size() + spoolKeySet.size());
318         allKeysSet.addAll(elementKeySet);
319         allKeysSet.addAll(spoolKeySet);
320         return allKeysSet.toArray();
321     }
322 
323     /**
324      * Returns the current store size.
325      *
326      * @noinspection SynchronizeOnNonFinalField
327      */
328     public final synchronized int getSize() {
329         try {
330             checkActive();
331             int spoolSize;
332             synchronized (spool) {
333                 spoolSize = spool.size();
334             }
335             int diskSize;
336             synchronized (diskElements) {
337                 diskSize = diskElements.size();
338             }
339             return spoolSize + diskSize;
340         } catch (Exception e) {
341             LOG.error(name + "Cache: Could not determine size of disk store.. Initial cause was " + e.getMessage(), e);
342             return 0;
343         }
344     }
345 
346     /**
347      * Returns the store status.
348      */
349     public final Status getStatus() {
350         return status;
351     }
352 
353     /**
354      * Puts an element into the disk store.
355      * <p/>
356      * This method is not synchronized. It is however threadsafe. It uses fine-grained
357      * synchronization on the spool
358      */
359     public final void put(final Element element) {
360         try {
361             checkActive();
362 
363             // Spool the element
364             if (spoolThread.isAlive()) {
365                 synchronized (spool) {
366                     spool.put(element.getObjectKey(), element);
367                 }
368             } else {
369                 LOG.error(name + "Cache: Elements cannot be written to disk store because the" +
370                         " spool thread has died.");
371                 synchronized (spool) {
372                     spool.clear();
373                 }
374             }
375 
376         } catch (Exception e) {
377             LOG.error(name + "Cache: Could not write disk store element for " + element.getObjectKey()
378                     + ". Initial cause was " + e.getMessage(), e);
379         }
380     }
381 
382     /**
383      * Removes an item from the disk store.
384      *
385      * @noinspection SynchronizeOnNonFinalField
386      */
387     public final synchronized Element remove(final Object key) {
388         Element element;
389         try {
390             checkActive();
391 
392             // Remove the entry from the spool
393             synchronized (spool) {
394                 element = (Element) spool.remove(key);
395             }
396 
397             // Remove the entry from the file. Could be in both places.
398             synchronized (diskElements) {
399                 final DiskElement diskElement = (DiskElement) diskElements.remove(key);
400                 if (diskElement != null) {
401                     element = loadElementFromDiskElement(diskElement);
402                     freeBlock(diskElement);
403                 }
404             }
405         } catch (Exception exception) {
406             String message = name + "Cache: Could not remove disk store entry for " + key
407                     + ". Error was " + exception.getMessage();
408             LOG.error(message, exception);
409             throw new CacheException(message);
410         }
411         return element;
412     }
413 
414     /**
415      * Marks a block as free.
416      */
417     private void freeBlock(final DiskElement element) {
418         totalSize -= element.payloadSize;
419         element.payloadSize = 0;
420         freeSpace.add(element);
421     }
422 
423     /**
424      * Remove all of the elements from the store.
425      * <p/>
426      * If there are registered <code>CacheEventListener</code>s they are notified of the expiry or removal
427      * of the <code>Element</code> as each is removed.
428      */
429     public final synchronized void removeAll() {
430         try {
431             checkActive();
432 
433             notifyingRemoveAll();
434 
435             // Ditch all the elements, and truncate the file
436             spool.clear();
437             diskElements.clear();
438             freeSpace.clear();
439             totalSize = 0;
440             randomAccessFile.setLength(0);
441             if (persistent) {
442                 indexFile.delete();
443                 indexFile.createNewFile();
444             }
445         } catch (Exception e) {
446             // Clean up
447             LOG.error(name + " Cache: Could not rebuild disk store. Initial cause was " + e.getMessage(), e);
448             dispose();
449         }
450     }
451 
452     private void notifyingRemoveAll() {
453         RegisteredEventListeners listeners = cache.getCacheEventNotificationService();
454         if (!listeners.getCacheEventListeners().isEmpty()) {
455             Object[] keys = getKeyArray();
456             for (int i = 0; i < keys.length; i++) {
457                 Serializable key = (Serializable) keys[i];
458                 Element element = remove(key);
459                 if (cache.isExpired(element)) {
460                     listeners.notifyElementExpiry(element, false);
461                 } else {
462                     listeners.notifyElementRemoved(element, false);
463                 }
464             }
465         }
466     }
467 
468     /**
469      * Shuts down the disk store in preparation for cache shutdown
470      * <p/>
471      * If a VM crash happens, the shutdown hook will not run. The data file and the index file
472      * will be out of synchronisation. At initialisation we always delete the index file
473      * after we have read the elements, so that it has a zero length. On a dirty restart, it still will have
474      * and the data file will automatically be deleted, thus preserving safety.
475      */
476     public final synchronized void dispose() {
477 
478         if (!active) {
479             return;
480         }
481 
482         // Close the cache
483         try {
484             if (expiryThread != null) {
485                 expiryThread.interrupt();
486             }
487 
488             flush();
489 
490             //Clear in-memory data structures
491             spool.clear();
492             diskElements.clear();
493             freeSpace.clear();
494             if (randomAccessFile != null) {
495                 randomAccessFile.close();
496             }
497             if (!persistent) {
498                 LOG.debug("Deleting file " + dataFile.getName());
499                 dataFile.delete();
500             }
501         } catch (Exception e) {
502             LOG.error(name + "Cache: Could not shut down disk cache. Initial cause was " + e.getMessage(), e);
503         } finally {
504             active = false;
505             randomAccessFile = null;
506             notifyAll();
507 
508             //release reference to cache
509             cache = null;
510         }
511     }
512 
513     /**
514      * Flush the spool if persistent, so we don't lose any data.
515      *
516      * @throws IOException
517      */
518     public final void flush() throws IOException {
519         if (persistent) {
520             flushSpool();
521             writeIndex();
522         }
523     }
524 
525     /**
526      * Whether there are any elements waiting to be spooled to disk.
527      *
528      * @return false if there are elements waiting, otherwise true
529      */
530     public final synchronized boolean isSpoolEmpty() {
531         return (!active || spool.size() == 0);
532     }
533 
534     /**
535      * Main method for the spool thread.
536      * <p/>
537      * Note that the spool thread locks the cache for the entire time it is writing elements to the disk.
538      */
539     private void spoolThreadMain() {
540         while (true) {
541             // Wait for elements in the spool
542             while (active && spool != null && spool.size() == 0) {
543                 try {
544                     Thread.sleep(SPOOL_THREAD_INTERVAL);
545                 } catch (InterruptedException e) {
546                     LOG.debug("Spool Thread interrupted.");
547                     return;
548                 }
549             }
550 
551 
552             if (!active) {
553                 return;
554             }
555 
556 
557             if (spool != null && spool.size() != 0) {
558                 // Write elements to disk
559                 try {
560                     flushSpool();
561                 } catch (Throwable e) {
562                     LOG.error(name + "Cache: Could not flush elements to disk due to " + e.getMessage() + ". Continuing...", e);
563                 }
564             }
565         }
566     }
567 
568     /**
569      * Flushes all spooled elements to disk.
570      * Note that the cache is locked for the entire time that the spool is being flushed.
571      *
572      * @noinspection SynchronizeOnNonFinalField
573      */
574     private synchronized void flushSpool() throws IOException {
575         Object[] spoolCopy = createCopyOfSpool();
576 
577         for (int i = 0; i < spoolCopy.length; i++) {
578             writeOrReplaceEntry(spoolCopy[i]);
579             spoolCopy[i] = null;
580 
581         }
582     }
583 
584     private Object[] createCopyOfSpool() {
585         Object[] spoolCopy;
586         synchronized (spool) {
587             spoolCopy = spool.values().toArray();
588             spool.clear();
589         }
590         return spoolCopy;
591     }
592 
593     private void writeOrReplaceEntry(Object object) throws IOException {
594         Element element = (Element) object;
595         if (element == null) {
596             return;
597         }
598         final Serializable key = (Serializable) element.getObjectKey();
599         removeOldEntryIfAny(key);
600         writeElement(element, key);
601     }
602 
603 
604     private void writeElement(Element element, Serializable key) throws IOException {
605         try {
606             int bufferLength;
607 
608             MemoryEfficientByteArrayOutputStream buffer = null;
609             try {
610                 buffer = serializeEntry(element);
611             } catch (OutOfMemoryError e) {
612                 LOG.error("OutOfMemoryError on serialize: " + key);
613 
614             }
615             bufferLength = buffer.size();
616             DiskElement diskElement = checkForFreeBlock(bufferLength);
617 
618             // Write the record
619             randomAccessFile.seek(diskElement.position);
620             randomAccessFile.write(buffer.toByteArray(), 0, bufferLength);
621             buffer = null;
622 
623             // Add to index, update stats
624             diskElement.payloadSize = bufferLength;
625 
626 
627             if (cache.isEternal()) {
628                 // Never expires
629                 diskElement.expiryTime = Long.MAX_VALUE;
630             } else {
631                 // Calculate expiry time
632                 long timeToLive = element.getCreationTime() + cache.getTimeToLiveSeconds() * MS_PER_SECOND;
633                 long timeToIdle = element.getLastAccessTime() + cache.getTimeToIdleSeconds() * MS_PER_SECOND;
634                 diskElement.expiryTime = Math.max(timeToLive, timeToIdle);
635             }
636 
637 
638             totalSize += bufferLength;
639             synchronized (diskElements) {
640                 diskElements.put(key, diskElement);
641             }
642 
643         } catch (Exception e) {
644             // Catch any exception that occurs during serialization
645             LOG.error(name + "Cache: Failed to write element to disk '" + key
646                     + "'. Initial cause was " + e.getMessage(), e);
647         }
648 
649     }
650 
651     /**
652      * This class is designed to minimse the number of System.arraycopy(); methods
653      * required to complete.
654      */
655     class MemoryEfficientByteArrayOutputStream extends ByteArrayOutputStream {
656 
657 
658         /**
659          * Creates a new byte array output stream, with a buffer capacity of
660          * the specified size, in bytes.
661          *
662          * @param size the initial size.
663          */
664         public MemoryEfficientByteArrayOutputStream(int size) {
665             super(size);
666         }
667 
668         /**
669          * Gets the bytes. Not all may be valid. Use only up to getSize()
670          *
671          * @return the underlying byte[]
672          */
673         public synchronized byte getBytes()[] {
674             return buf;
675         }
676     }
677 
678     private MemoryEfficientByteArrayOutputStream serializeEntry(Element element) throws IOException {
679         MemoryEfficientByteArrayOutputStream outstr = new MemoryEfficientByteArrayOutputStream(estimatedPayloadSize());
680         ObjectOutputStream objstr = new ObjectOutputStream(outstr);
681         objstr.writeObject(element);
682         objstr.close();
683         return outstr;
684     }
685 
686 
687     private int estimatedPayloadSize() {
688         int size = 0;
689         try {
690             size = (int) (totalSize / diskElements.size());
691         } catch (Exception e) {
692             //
693         }
694         if (size <= 0) {
695             size = ESTIMATED_MINIMUM_PAYLOAD_SIZE;
696         }
697         return size;
698     }
699 
700     /**
701      * Remove the old entry, if any
702      *
703      * @param key
704      */
705     private void removeOldEntryIfAny(Serializable key) {
706 
707         final DiskElement oldBlock;
708         synchronized (diskElements) {
709             oldBlock = (DiskElement) diskElements.remove(key);
710         }
711         if (oldBlock != null) {
712             freeBlock(oldBlock);
713         }
714     }
715 
716     private DiskElement checkForFreeBlock(int bufferLength) throws IOException {
717         DiskElement diskElement = findFreeBlock(bufferLength);
718         if (diskElement == null) {
719             diskElement = new DiskElement();
720             diskElement.position = randomAccessFile.length();
721             diskElement.blockSize = bufferLength;
722         }
723         return diskElement;
724     }
725 
726 
727     /**
728      * Writes the Index to disk on shutdown
729      * <p/>
730      * The index consists of the elements Map and the freeSpace List
731      * <p/>
732      * Note that the cache is locked for the entire time that the index is being written
733      */
734     private synchronized void writeIndex() throws IOException {
735 
736         ObjectOutputStream objectOutputStream = null;
737         try {
738             FileOutputStream fout = new FileOutputStream(indexFile);
739             objectOutputStream = new ObjectOutputStream(fout);
740             objectOutputStream.writeObject(diskElements);
741             objectOutputStream.writeObject(freeSpace);
742         } finally {
743             if (objectOutputStream != null) {
744                 objectOutputStream.close();
745             }
746         }
747     }
748 
749     /**
750      * Reads Index to disk on startup.
751      * <p/>
752      * if the index file does not exist, it creates a new one.
753      * <p/>
754      * Note that the cache is locked for the entire time that the index is being written
755      *
756      * @return True if the index was read successfully, false otherwise
757      */
758     private synchronized boolean readIndex() throws IOException {
759         ObjectInputStream objectInputStream = null;
760         FileInputStream fin = null;
761         boolean success = false;
762         if (indexFile.exists()) {
763             try {
764                 fin = new FileInputStream(indexFile);
765                 objectInputStream = new ObjectInputStream(fin);
766                 diskElements = (Map) objectInputStream.readObject();
767                 freeSpace = (List) objectInputStream.readObject();
768                 success = true;
769             } catch (StreamCorruptedException e) {
770                 LOG.error("Corrupt index file. Creating new index.");
771             } catch (IOException e) {
772                 //normal when creating the cache for the first time
773                 LOG.debug("IOException reading index. Creating new index. ");
774             } catch (ClassNotFoundException e) {
775                 LOG.error("Class loading problem reading index. Creating new index. Initial cause was " + e.getMessage(), e);
776             } finally {
777                 try {
778                     if (objectInputStream != null) {
779                         objectInputStream.close();
780                     } else if (fin != null) {
781                         fin.close();
782                     }
783                 } catch (IOException e) {
784                     LOG.error("Problem closing the index file.");
785                 }
786 
787                 //Always zero out file. That way if there is a dirty shutdown, the file will still be empty
788                 //the next time we start up and readIndex will automatically fail.
789                 //If there was a problem reading the index this time we also want to zero it out.
790                 createNewIndexFile();
791             }
792         } else {
793             createNewIndexFile();
794         }
795 
796         //Return the success flag
797         return success;
798 
799     }
800 
801     private void createNewIndexFile() throws IOException {
802         if (indexFile.exists()) {
803             indexFile.delete();
804             LOG.debug("Index file " + indexFile + " deleted.");
805         }
806         if (indexFile.createNewFile()) {
807             LOG.debug("Index file " + indexFile + " created successfully");
808         } else {
809             throw new IOException("Index file " + indexFile + " could not created.");
810         }
811     }
812 
813     /**
814      * The main method for the expiry thread.
815      * <p/>
816      * Will run while the cache is active. After the cache shuts down
817      * it will take the expiryThreadInterval to wake up and complete.
818      */
819     private void expiryThreadMain() {
820         long expiryThreadIntervalMillis = expiryThreadInterval * MS_PER_SECOND;
821         try {
822             while (active) {
823                 Thread.sleep(expiryThreadIntervalMillis);
824 
825                 //Expire the elements
826                 expireElements();
827             }
828         } catch (InterruptedException e) {
829             // Bail on interruption
830             if (LOG.isDebugEnabled()) {
831                 LOG.debug(name + "Cache: Expiry thread interrupted on Disk Store.");
832             }
833         }
834     }
835 
836     /**
837      * Removes expired elements.
838      * Note that the cache is locked for the entire time that elements are being expired.
839      *
840      * @noinspection SynchronizeOnNonFinalField
841      */
842     private void expireElements() {
843         final long now = System.currentTimeMillis();
844 
845         // Clean up the spool
846         synchronized (spool) {
847             for (Iterator iterator = spool.values().iterator(); iterator.hasNext();) {
848                 final Element element = (Element) iterator.next();
849                 if (cache.isExpired(element)) {
850                     // An expired element
851                     if (LOG.isDebugEnabled()) {
852                         LOG.debug(name + "Cache: Removing expired spool element " + element.getObjectKey());
853                     }
854                     iterator.remove();
855                     notifyExpiryListeners(element);
856                 }
857             }
858         }
859 
860         synchronized (diskElements) {
861             // Clean up disk elements
862             for (Iterator iterator = diskElements.entrySet().iterator(); iterator.hasNext();) {
863                 final Map.Entry entry = (Map.Entry) iterator.next();
864                 final DiskElement diskElement = (DiskElement) entry.getValue();
865                 if (now >= diskElement.expiryTime) {
866                     // An expired element
867                     if (LOG.isDebugEnabled()) {
868                         LOG.debug(name + "Cache: Removing expired spool element " + entry.getKey() + " from Disk Store");
869                     }
870                     iterator.remove();
871                     Element element = null;
872                     try {
873                         element = loadElementFromDiskElement(diskElement);
874                     } catch (Exception exception) {
875                         LOG.error(name + "Cache: Could not remove disk store entry for " + entry.getKey()
876                                 + ". Error was " + exception.getMessage(), exception);
877                     }
878                     freeBlock(diskElement);
879                     notifyExpiryListeners(element);
880                 }
881             }
882         }
883     }
884 
885     /**
886      * It is enough that an element is expiring here. Notify even though there might be another
887      * element with the same key elsewhere in the stores.
888      *
889      * @param element
890      */
891     private void notifyExpiryListeners(Element element) {
892         cache.getCacheEventNotificationService().notifyElementExpiry(element, false);
893     }
894 
895     /**
896      * Allocates a free block.
897      */
898     private DiskElement findFreeBlock(final int length) {
899         for (int i = 0; i < freeSpace.size(); i++) {
900             final DiskElement element = (DiskElement) freeSpace.get(i);
901             if (element.blockSize >= length) {
902                 freeSpace.remove(i);
903                 return element;
904             }
905         }
906         return null;
907     }
908 
909     /**
910      * Returns a {@link String} representation of the {@link DiskStore}
911      */
912     public final String toString() {
913         StringBuffer sb = new StringBuffer();
914         sb.append("[ dataFile = ").append(dataFile.getAbsolutePath())
915                 .append(", active=").append(active)
916                 .append(", totalSize=").append(totalSize)
917                 .append(", status=").append(status)
918                 .append(", expiryThreadInterval = ").append(expiryThreadInterval)
919                 .append(" ]");
920         return sb.toString();
921     }
922 
923 
924     /**
925      * A reference to an on-disk elements.
926      *
927      * @noinspection SerializableHasSerializationMethods
928      */
929     private static final class DiskElement implements Serializable {
930 
931         private static final long serialVersionUID = -717310932566592289L;
932 
933         /**
934          * the file pointer
935          */
936         private long position;
937 
938         /**
939          * The size used for data.
940          */
941         private int payloadSize;
942 
943         /**
944          * the size of this element.
945          */
946         private int blockSize;
947 
948         /**
949          * The expiry time in milliseconds
950          */
951         private long expiryTime;
952 
953     }
954 
955     /**
956      * A background daemon thread that writes objects to the file.
957      */
958     private final class SpoolThread extends Thread {
959         public SpoolThread() {
960             super("Store " + name + " Spool Thread");
961             setDaemon(true);
962             setPriority(2);
963         }
964 
965         /**
966          * Main thread method.
967          */
968         public final void run() {
969             spoolThreadMain();
970         }
971     }
972 
973     /**
974      * A background daemon thread that removes expired objects.
975      */
976     private final class ExpiryThread extends Thread {
977         public ExpiryThread() {
978             super("Store " + name + " Expiry Thread");
979             setDaemon(true);
980             setPriority(1);
981         }
982 
983         /**
984          * Main thread method.
985          */
986         public final void run() {
987             expiryThreadMain();
988         }
989     }
990 
991     /**
992      * @return the total size of the data file and the index file, in bytes.
993      */
994     public final long getTotalFileSize() {
995         return getDataFileSize() + getIndexFileSize();
996     }
997 
998     /**
999      * @return the size of the data file in bytes.
1000      */
1001     public final long getDataFileSize() {
1002         return dataFile.length();
1003     }
1004 
1005     /**
1006      * The design of the layout on the data file means that there will be small gaps created when DiskElements
1007      * are reused.
1008      *
1009      * @return the sparseness, measured as the percentage of space in the Data File not used for holding data
1010      */
1011     public final float calculateDataFileSparseness() {
1012         return 1 - ((float) getUsedDataSize() / (float) getDataFileSize());
1013     }
1014 
1015     /**
1016      * When elements are deleted, spaces are left in the file. These spaces are tracked and are reused
1017      * when new elements need to be written.
1018      * <p/>
1019      * This method indicates the actual size used for data, excluding holes. It can be compared with
1020      * {@link #getDataFileSize()} as a measure of fragmentation.
1021      */
1022     public final long getUsedDataSize() {
1023         return totalSize;
1024     }
1025 
1026     /**
1027      * @return the size of the index file, in bytes.
1028      */
1029     public final long getIndexFileSize() {
1030         if (indexFile == null) {
1031             return 0;
1032         } else {
1033             return indexFile.length();
1034         }
1035     }
1036 
1037     /**
1038      * @return the file name of the data file where the disk store stores data, without any path information.
1039      */
1040     public final String getDataFileName() {
1041         return name + ".data";
1042     }
1043 
1044     /**
1045      * @return the disk path, which will be dependent on the operating system
1046      */
1047     public final String getDataFilePath() {
1048         return diskPath;
1049     }
1050 
1051     /**
1052      * @return the file name of the index file, which maintains a record of elements and their addresses
1053      *         on the data file, without any path information.
1054      */
1055     public final String getIndexFileName() {
1056         return name + ".index";
1057     }
1058 
1059 
1060     /**
1061      * The expiry thread is started provided the cache is not eternal
1062      * <p/>
1063      * If started it will continue to run until the {@link #dispose()} method is called,
1064      * at which time it should be interrupted and then die.
1065      *
1066      * @return true if an expiryThread was created and is still alive.
1067      */
1068     public final boolean isExpiryThreadAlive() {
1069         if (expiryThread == null) {
1070             return false;
1071         } else {
1072             return expiryThread.isAlive();
1073         }
1074     }
1075 
1076 }