Commits

Anonymous committed 3366e55

Submitter: Andres March

refactoring. DOES NOT COMPILE

  • Participants
  • Parent commits 82a8f6a
  • Branches amarch_sandbox

Comments (0)

Files changed (16)

File src/core/java/com/opensymphony/oscache/base/Cache.java

      *
      * @return The object from cache
      *
-     * @throws NeedsRefreshException Thrown when the object either
-     * doesn't exist, or exists but is stale. When this exception occurs,
-     * the CacheEntry corresponding to the supplied key will be locked
-     * and other threads requesting this entry will potentially be blocked
-     * until the caller repopulates the cache. If the caller choses not
-     * to repopulate the cache, they <em>must</em> instead call
-     * {@link #cancelUpdate(String)}.
      */
-    public abstract Object getFromCache(String key) throws NeedsRefreshException;
+    public abstract Object get(Object key) ;
 
     /**
      * Retrieve an object from the cache specifying its key.
      *
      * @return The object from cache
      *
-     * @throws NeedsRefreshException Thrown when the object either
-     * doesn't exist, or exists but is stale. When this exception occurs,
-     * the CacheEntry corresponding to the supplied key will be locked
-     * and other threads requesting this entry will potentially be blocked
-     * until the caller repopulates the cache. If the caller choses not
-     * to repopulate the cache, they <em>must</em> instead call
-     * {@link #cancelUpdate(String)}.
      */
-    public abstract Object getFromCache(String key, int refreshPeriod) throws NeedsRefreshException;
+    public abstract Object get(Object key, int refreshPeriod);
 
     /**
      * Retrieve an object from the cache specifying its key.
      *
      * @return The object from cache
      *
-     * @throws NeedsRefreshException Thrown when the object either
-     * doesn't exist, or exists but is stale. When this exception occurs,
-     * the CacheEntry corresponding to the supplied key will be locked
-     * and other threads requesting this entry will potentially be blocked
-     * until the caller repopulates the cache. If the caller choses not
-     * to repopulate the cache, they <em>must</em> instead call
-     * {@link #cancelUpdate(String)}.
      */
-    public abstract Object getFromCache(String key, int refreshPeriod, String cronExpiry) throws NeedsRefreshException;
+    public abstract Object get(Object key, int refreshPeriod, String cronExpiry);
 
     /**
      * Set the listener to use for data persistence. Only one
      */
     public abstract void flushEntry(String key, String origin);
 
-    /**
-     * Flushes all objects that belong to the supplied group. On completion
-     * this method fires a <tt>CacheEntryEventType.GROUP_FLUSHED</tt> event.
-     *
-     * @param group The group to flush
-     */
-    public abstract void flushGroup(String group);
-
-    /**
-     * Flushes all unexpired objects that belong to the supplied group. On
-     * completion this method fires a <tt>CacheEntryEventType.GROUP_FLUSHED</tt>
-     * event.
-     *
-     * @param group The group to flush
-     * @param origin The origin of this flush event (optional)
-     */
-    public abstract void flushGroup(String group, String origin);
-
-    /**
-     * Flush all entries with keys that match a given pattern
-     *
-     * @param  pattern The key must contain this given value
-     * @deprecated For performance and flexibility reasons it is preferable to
-     * store cache entries in groups and use the {@link #flushGroup(String)} method
-     * instead of relying on pattern flushing.
-     */
-    public abstract void flushPattern(String pattern);
-
-    /**
-     * Flush all entries with keys that match a given pattern
-     *
-     * @param  pattern The key must contain this given value
-     * @param origin The origin of this flush request
-     * @deprecated For performance and flexibility reasons it is preferable to
-     * store cache entries in groups and use the {@link #flushGroup(String, String)}
-     * method instead of relying on pattern flushing.
-     */
-    public abstract void flushPattern(String pattern, String origin);
-
+   
     /**
      * Put an object in the cache specifying the key to use.
      *
      * @param key       Key of the object in the cache.
      * @param content   The object to cache.
      */
-    public abstract void putInCache(String key, Object content);
+    public abstract void put(Object key, Object content);
 
     /**
      * Put an object in the cache specifying the key and refresh policy to use.
      * @param content   The object to cache.
      * @param policy   Object that implements refresh policy logic
      */
-    public abstract void putInCache(String key, Object content, EntryRefreshPolicy policy);
-
-    /**
-     * Put in object into the cache, specifying both the key to use and the
-     * cache groups the object belongs to.
-     *
-     * @param key       Key of the object in the cache
-     * @param content   The object to cache
-     * @param groups    The cache groups to add the object to
-     */
-    public abstract void putInCache(String key, Object content, String[] groups);
+    public abstract void put(Object key, Object content, EntryRefreshPolicy policy);
 
     /**
      * Put an object into the cache specifying both the key to use and the
      * cache groups the object belongs to.
      *
      * @param key       Key of the object in the cache
-     * @param groups    The cache groups to add the object to
      * @param content   The object to cache
      * @param policy    Object that implements the refresh policy logic
      */
-    public abstract void putInCache(String key, Object content, String[] groups, EntryRefreshPolicy policy, String origin);
+    public abstract void put(Object key, Object content, EntryRefreshPolicy policy, String origin);
 
     /**
      * Unregister a listener for Cache events.

File src/core/java/com/opensymphony/oscache/base/CacheEntry.java

  */
     private Object content = null;
 
-    /**
- * The set of cache groups that this cache entry belongs to, if any.
- */
-    private Set groups = null;
 
     /**
  *  The unique cache key for this entry
  */
-    private String key;
+    private Object key;
 
     /**
  * <code>true</code> if this entry was flushed
  * @param policy   Object that implements refresh policy logic. This parameter
  * is optional.
  */
-    public CacheEntry(String key, EntryRefreshPolicy policy) {
+    public CacheEntry(Object key, EntryRefreshPolicy policy) {
         this(key, policy, null);
     }
 
  * @param groups  The groups that this <code>CacheEntry</code> belongs to. This
  * parameter is optional.
  */
-    public CacheEntry(String key, EntryRefreshPolicy policy, String[] groups) {
+    public CacheEntry(Object key, EntryRefreshPolicy policy, String[] groups) {
         this.key = key;
 
-        if (groups != null) {
-            this.groups = new HashSet(groups.length);
-
-            for (int i = 0; i < groups.length; i++) {
-                this.groups.add(groups[i]);
-            }
-        }
-
         this.policy = policy;
         this.created = System.currentTimeMillis();
         this.updateState = new EntryUpdateState();
         return created;
     }
 
-    /**
- * Sets the cache groups for this entry.
- *
- * @param groups A string array containing all the group names
- */
-    public synchronized void setGroups(String[] groups) {
-        if (groups != null) {
-            this.groups = new HashSet(groups.length);
-
-            for (int i = 0; i < groups.length; i++) {
-                this.groups.add(groups[i]);
-            }
-        } else {
-            this.groups = null;
-        }
-
-        lastUpdate = System.currentTimeMillis();
-    }
-
-    /**
- * Sets the cache groups for this entry
- *
- * @param groups A collection containing all the group names
- */
-    public void setGroups(Collection groups) {
-        if (groups != null) {
-            this.groups = new HashSet(groups);
-        } else {
-            this.groups = null;
-        }
-
-        lastUpdate = System.currentTimeMillis();
-    }
-
-    /**
- * Gets the cache groups that this cache entry belongs to.
- * These returned groups should be treated as immuatable.
- *
- * @return A set containing the names of all the groups that
- * this cache entry belongs to.
- */
-    public Set getGroups() {
-        return groups;
-    }
 
     /**
  * Get the key of this CacheEntry
  *
  * @return The key of this CacheEntry
  */
-    public String getKey() {
+    public Object getKey() {
         return key;
     }
 
     /**
  * Get the size of the cache entry in bytes (roughly).<p>
  *
- * Currently this method only handles <code>String<code>s and
+ * Currently this method only handles 
  * {@link ResponseContent} objects.
  *
  * @return The approximate size of the entry in bytes, or -1 if the
  * size could not be estimated.
  */
     public int getSize() {
-        // a char is two bytes
-        int size = (key.length() * 2) + 4;
+        int size = 0;
 
         if (content.getClass() == String.class) {
             size += ((content.toString().length() * 2) + 4);

File src/core/java/com/opensymphony/oscache/base/CacheImpl.java

  */
 package com.opensymphony.oscache.base;
 
+import java.io.Serializable;
+import java.text.ParseException;
+import java.util.Date;
+import java.util.Map;
+
+import javax.swing.event.EventListenerList;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
 import com.opensymphony.oscache.base.events.CacheEntryEvent;
 import com.opensymphony.oscache.base.events.CacheEntryEventListener;
 import com.opensymphony.oscache.base.events.CacheEntryEventType;
 import com.opensymphony.oscache.util.FastCronParser;
 
 import edu.emory.mathcs.backport.java.util.concurrent.ConcurrentHashMap;
-import edu.emory.mathcs.backport.java.util.concurrent.CopyOnWriteArraySet;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import java.io.Serializable;
-
-import java.text.ParseException;
-
-import java.util.Date;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-
-import javax.swing.event.EventListenerList;
 
 /**
  * Provides an interface to the cache itself. Creating an instance of this class
     /**
      * The actual cache map. This is where the cached objects are held.
      */
-    private Map cacheMap = null;
+    private Map cacheMap = new ConcurrentHashMap();
 
-    /**
-    * The groups map. This is where the cached objects group memberships are stored.
-    */
-    private Map groups = null;
     private PersistenceListener persistenceListener;
 
     /**
     private boolean useMemoryCaching;
     private int capacity;
 
+	/**
+	 * 
+	 */
+	public CacheImpl() {
+
+		// TODO Auto-generated constructor stub
+	}
     /**
      * Create a new Cache
      *
      * @param overflowPersistence Specify if the persistent cache is used in overflow only mode
      */
     public CacheImpl(boolean useMemoryCaching, boolean unlimitedDiskCache, boolean overflowPersistence) {
-        this(useMemoryCaching, unlimitedDiskCache, overflowPersistence, false, null, 0);
+        this(useMemoryCaching, unlimitedDiskCache, overflowPersistence, false, 0);
     }
 
     /**
      * (<code>blocking == false</code>). the default is <code>false</code>,
      * which provides better performance but at the expense of slightly stale
      * data being served.
-     * @param algorithmClass The class implementing the desired algorithm
      * @param capacity The capacity
      */
-    public CacheImpl(boolean useMemoryCaching, boolean unlimitedDiskCache, boolean overflowPersistence, boolean blocking, String algorithmClass, int capacity) {
+    public CacheImpl(boolean useMemoryCaching, boolean unlimitedDiskCache, boolean overflowPersistence, boolean blocking, int capacity) {
         try {
             cacheMap = new ConcurrentHashMap();
         } catch (Exception e) {
         this.blocking = blocking;
     }
 
-    /* (non-Javadoc)
+    /**
+	 * @param capacity2
+	 */
+	public CacheImpl(int capacity) {
+		this.capacity = capacity;
+	}
+	
+	/* (non-Javadoc)
          * @see com.opensymphony.oscache.base.CacheAPI#setCapacity(int)
          */
     public void setCapacity(int capacity) {
     /* (non-Javadoc)
          * @see com.opensymphony.oscache.base.CacheAPI#getFromCache(java.lang.String)
          */
-    public Object getFromCache(String key) throws NeedsRefreshException {
-        return getFromCache(key, CacheEntry.INDEFINITE_EXPIRY, null);
+    public Object get(Object key){
+        return get(key, CacheEntry.INDEFINITE_EXPIRY, null);
     }
 
     /* (non-Javadoc)
          * @see com.opensymphony.oscache.base.CacheAPI#getFromCache(java.lang.String, int)
          */
-    public Object getFromCache(String key, int refreshPeriod) throws NeedsRefreshException {
-        return getFromCache(key, refreshPeriod, null);
+    public Object get(Object key, int refreshPeriod){
+        return get(key, refreshPeriod, null);
     }
 
     /* (non-Javadoc)
          * @see com.opensymphony.oscache.base.CacheAPI#getFromCache(java.lang.String, int, java.lang.String)
          */
-    public Object getFromCache(String key, int refreshPeriod, String cronExpiry) throws NeedsRefreshException {
+    public Object get(Object key, int refreshPeriod, String cronExpiry){
         CacheEntry cacheEntry = getCacheEntry(key, null, null);
 
         Object content = cacheEntry.getContent();
 
         // If we didn't end up getting a hit then we need to throw a NRE
         if (accessEventType != CacheMapAccessEventType.HIT) {
-            throw new NeedsRefreshException(content);
+//            throw new NeedsRefreshException(content);
         }
 
         return content;
     }
 
     /* (non-Javadoc)
-         * @see com.opensymphony.oscache.base.CacheAPI#flushGroup(java.lang.String)
-         */
-    public void flushGroup(String group) {
-        flushGroup(group, null);
-    }
-
-    /* (non-Javadoc)
-         * @see com.opensymphony.oscache.base.CacheAPI#flushGroup(java.lang.String, java.lang.String)
-         */
-    public void flushGroup(String group, String origin) {
-        // Flush all objects in the group
-        Set groupEntries = (Set) groups.get(group);
-
-        if (groupEntries != null) {
-            Iterator itr = groupEntries.iterator();
-            String key;
-            CacheEntry entry;
-
-            while (itr.hasNext()) {
-                key = (String) itr.next();
-                entry = (CacheEntry) cacheMap.get(key);
-
-                if ((entry != null) && !entry.needsRefresh(CacheEntry.INDEFINITE_EXPIRY)) {
-                    flushEntry(entry, NESTED_EVENT);
-                }
-            }
-        }
-
-        if (listenerList.getListenerCount() > 0) {
-            dispatchCacheGroupEvent(CacheEntryEventType.GROUP_FLUSHED, group, origin);
-        }
-    }
-
-    /* (non-Javadoc)
-         * @see com.opensymphony.oscache.base.CacheAPI#flushPattern(java.lang.String)
-         */
-    public void flushPattern(String pattern) {
-        flushPattern(pattern, null);
-    }
-
-    /* (non-Javadoc)
-         * @see com.opensymphony.oscache.base.CacheAPI#flushPattern(java.lang.String, java.lang.String)
-         */
-    public void flushPattern(String pattern, String origin) {
-        // Check the pattern
-        if ((pattern != null) && (pattern.length() > 0)) {
-            String key = null;
-            CacheEntry entry = null;
-            Iterator itr = cacheMap.keySet().iterator();
-
-            while (itr.hasNext()) {
-                key = (String) itr.next();
-
-                if (key.indexOf(pattern) >= 0) {
-                    entry = (CacheEntry) cacheMap.get(key);
-
-                    if (entry != null) {
-                        flushEntry(entry, origin);
-                    }
-                }
-            }
-
-            if (listenerList.getListenerCount() > 0) {
-                dispatchCachePatternEvent(CacheEntryEventType.PATTERN_FLUSHED, pattern, origin);
-            }
-        } else {
-            // Empty pattern, nothing to do
-        }
-    }
-
-    /* (non-Javadoc)
          * @see com.opensymphony.oscache.base.CacheAPI#putInCache(java.lang.String, java.lang.Object)
          */
-    public void putInCache(String key, Object content) {
-        putInCache(key, content, null, null, null);
+    public void put(Object key, Object content) {
+        put(key, content, null);
     }
 
     /* (non-Javadoc)
          * @see com.opensymphony.oscache.base.CacheAPI#putInCache(java.lang.String, java.lang.Object, com.opensymphony.oscache.base.EntryRefreshPolicy)
          */
-    public void putInCache(String key, Object content, EntryRefreshPolicy policy) {
-        putInCache(key, content, null, policy, null);
-    }
-
-    /* (non-Javadoc)
-         * @see com.opensymphony.oscache.base.CacheAPI#putInCache(java.lang.String, java.lang.Object, java.lang.String[])
-         */
-    public void putInCache(String key, Object content, String[] groups) {
-        putInCache(key, content, groups, null, null);
+    public void put(Object key, Object content, EntryRefreshPolicy policy) {
+        put(key, content, null, null);
     }
 
     /* (non-Javadoc)
          * @see com.opensymphony.oscache.base.CacheAPI#putInCache(java.lang.String, java.lang.Object, java.lang.String[], com.opensymphony.oscache.base.EntryRefreshPolicy, java.lang.String)
          */
-    public void putInCache(String key, Object content, String[] groups, EntryRefreshPolicy policy, String origin) {
+    public void put(Object key, Object content, EntryRefreshPolicy policy, String origin) {
         CacheEntry cacheEntry = getCacheEntry(key, policy, origin);
         boolean isNewEntry = cacheEntry.isNew();
 
         synchronized (cacheEntry) {
             cacheEntry.setContent(content);
-            cacheEntry.setGroups(groups);
             cacheMap.put(key, cacheEntry);
-
-            if (groups != null) {
-                addGroupMappings(key, groups);
-            }
+           
         }
 
         // Signal to any threads waiting on this update that it's now ready for them
         }
     }
 
-    /**
-    * Add this cache key to the groups specified groups.
-    * We have to treat the
-    * memory and disk group mappings seperately so they remain valid for their
-    * corresponding memory/disk caches. (eg if mem is limited to 100 entries
-    * and disk is unlimited, the group mappings will be different).
-    *
-    * @param key The cache key that we are ading to the groups.
-    * @param newGroups the set of groups we want to add this cache entry to.
-    * @param persist A flag to indicate whether the keys should be added to
-    * the persistent cache layer.
-    */
-    private void addGroupMappings(String key, String[] newGroups) {
-        // Add this CacheEntry to the groups that it is now a member of
-        for (int i = 0; i < newGroups.length; i++) {
-            String groupName = newGroups[i];
-
-            if (groups == null) {
-                groups = new ConcurrentHashMap();
-            }
-
-            Set group = (Set) groups.get(groupName);
-
-            if (group == null) {
-                group = new CopyOnWriteArraySet();
-                groups.put(groupName, group);
-            }
-
-            group.add(key);
-        }
-    }
 
     /* (non-Javadoc)
          * @see com.opensymphony.oscache.base.CacheAPI#removeCacheEventListener(com.opensymphony.oscache.base.events.CacheEventListener, java.lang.Class)
      * @param origin The origin of request (optional)
      * @return CacheEntry for the specified key.
      */
-    protected CacheEntry getCacheEntry(String key, EntryRefreshPolicy policy, String origin) {
+    protected CacheEntry getCacheEntry(Object key, EntryRefreshPolicy policy, String origin) {
         CacheEntry cacheEntry = null;
 
         // Verify that the key is valid
-        if ((key == null) || (key.length() == 0)) {
+        if (key == null) {
             throw new IllegalArgumentException("getCacheEntry called with an empty or null key");
         }
 
      * @param origin The origin of this flush event (optional)
      */
     private void flushEntry(CacheEntry entry, String origin) {
-        String key = entry.getKey();
+    	Object key = entry.getKey();
 
         // Flush the object itself
         entry.flush();

File src/core/java/com/opensymphony/oscache/base/NeedsRefreshException.java

-/*
- * Copyright (c) 2002-2003 by OpenSymphony
- * All rights reserved.
- */
-package com.opensymphony.oscache.base;
-
-
-/**
- * This exception is thrown when retrieving an item from cache and it is
- * expired.
- * Note that for fault tolerance purposes, it is possible to retrieve the
- * current cached object from the exception.
- *
- * <p>January, 2004 - The OSCache developers are aware of the fact that throwing
- * an exception for a perfect valid situation (cache miss) is design smell. This will
- * be removed in the near future, and other means of refreshing the cache will be
- * provided.</p>
- *
- * @author        <a href="mailto:fbeauregard@pyxis-tech.com">Francois Beauregard</a>
- * @version        $Revision$
- */
-public final class NeedsRefreshException extends Exception {
-    /**
-     * Current object in the cache
-     */
-    private Object cacheContent = null;
-
-    /**
-     * Create a NeedsRefreshException
-     */
-    public NeedsRefreshException(Object cacheContent) {
-        super();
-        this.cacheContent = cacheContent;
-    }
-
-    /**
-     * Retrieve current object in the cache
-     */
-    public Object getCacheContent() {
-        return cacheContent;
-    }
-}

File src/core/java/com/opensymphony/oscache/base/algorithm/AbstractConcurrentReadCache.java

-/*
- * Copyright (c) 2002-2003 by OpenSymphony
- * All rights reserved.
- */
-/*
-        File: AbstractConcurrentReadCache
-
-        Written by Doug Lea. Adapted from JDK1.2 HashMap.java and Hashtable.java
-        which carries the following copyright:
-
-                 * Copyright 1997 by Sun Microsystems, Inc.,
-                 * 901 San Antonio Road, Palo Alto, California, 94303, U.S.A.
-                 * All rights reserved.
-                 *
-                 * This software is the confidential and proprietary information
-                 * of Sun Microsystems, Inc. ("Confidential Information").  You
-                 * shall not disclose such Confidential Information and shall use
-                 * it only in accordance with the terms of the license agreement
-                 * you entered into with Sun.
-
-        This class is a modified version of ConcurrentReaderHashMap, which was written
-        by Doug Lea (http://gee.cs.oswego.edu/dl/). The modifications where done
-        by Pyxis Technologies. This is a base class for the OSCache module of the
-        openSymphony project (www.opensymphony.com).
-
-        History:
-        Date       Who                What
-        28oct1999  dl               Created
-        14dec1999  dl               jmm snapshot
-        19apr2000  dl               use barrierLock
-        12jan2001  dl               public release
-        Oct2001    abergevin@pyxis-tech.com
-                                                                Integrated persistence and outer algorithm support
-*/
-package com.opensymphony.oscache.base.algorithm;
-
-
-/** OpenSymphony BEGIN */
-import com.opensymphony.oscache.base.CacheEntry;
-import com.opensymphony.oscache.base.persistence.CachePersistenceException;
-import com.opensymphony.oscache.base.persistence.PersistenceListener;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import java.io.IOException;
-import java.io.Serializable;
-
-import java.util.*;
-
-/**
- * A version of Hashtable that supports mostly-concurrent reading, but exclusive writing.
- * Because reads are not limited to periods
- * without writes, a concurrent reader policy is weaker than a classic
- * reader/writer policy, but is generally faster and allows more
- * concurrency. This class is a good choice especially for tables that
- * are mainly created by one thread during the start-up phase of a
- * program, and from then on, are mainly read (with perhaps occasional
- * additions or removals) in many threads.  If you also need concurrency
- * among writes, consider instead using ConcurrentHashMap.
- * <p>
- *
- * Successful retrievals using get(key) and containsKey(key) usually
- * run without locking. Unsuccessful ones (i.e., when the key is not
- * present) do involve brief synchronization (locking).  Also, the
- * size and isEmpty methods are always synchronized.
- *
- * <p> Because retrieval operations can ordinarily overlap with
- * writing operations (i.e., put, remove, and their derivatives),
- * retrievals can only be guaranteed to return the results of the most
- * recently <em>completed</em> operations holding upon their
- * onset. Retrieval operations may or may not return results
- * reflecting in-progress writing operations.  However, the retrieval
- * operations do always return consistent results -- either those
- * holding before any single modification or after it, but never a
- * nonsense result.  For aggregate operations such as putAll and
- * clear, concurrent reads may reflect insertion or removal of only
- * some entries. In those rare contexts in which you use a hash table
- * to synchronize operations across threads (for example, to prevent
- * reads until after clears), you should either encase operations
- * in synchronized blocks, or instead use java.util.Hashtable.
- *
- * <p>
- *
- * This class also supports optional guaranteed
- * exclusive reads, simply by surrounding a call within a synchronized
- * block, as in <br>
- * <code>AbstractConcurrentReadCache t; ... Object v; <br>
- * synchronized(t) { v = t.get(k); } </code> <br>
- *
- * But this is not usually necessary in practice. For
- * example, it is generally inefficient to write:
- *
- * <pre>
- *   AbstractConcurrentReadCache t; ...            // Inefficient version
- *   Object key; ...
- *   Object value; ...
- *   synchronized(t) {
- *     if (!t.containsKey(key))
- *       t.put(key, value);
- *       // other code if not previously present
- *     }
- *     else {
- *       // other code if it was previously present
- *     }
- *   }
- *</pre>
- * Instead, just take advantage of the fact that put returns
- * null if the key was not previously present:
- * <pre>
- *   AbstractConcurrentReadCache t; ...                // Use this instead
- *   Object key; ...
- *   Object value; ...
- *   Object oldValue = t.put(key, value);
- *   if (oldValue == null) {
- *     // other code if not previously present
- *   }
- *   else {
- *     // other code if it was previously present
- *   }
- *</pre>
- * <p>
- *
- * Iterators and Enumerations (i.e., those returned by
- * keySet().iterator(), entrySet().iterator(), values().iterator(),
- * keys(), and elements()) return elements reflecting the state of the
- * hash table at some point at or since the creation of the
- * iterator/enumeration.  They will return at most one instance of
- * each element (via next()/nextElement()), but might or might not
- * reflect puts and removes that have been processed since they were
- * created.  They do <em>not</em> throw ConcurrentModificationException.
- * However, these iterators are designed to be used by only one
- * thread at a time. Sharing an iterator across multiple threads may
- * lead to unpredictable results if the table is being concurrently
- * modified.  Again, you can ensure interference-free iteration by
- * enclosing the iteration in a synchronized block.  <p>
- *
- * This class may be used as a direct replacement for any use of
- * java.util.Hashtable that does not depend on readers being blocked
- * during updates. Like Hashtable but unlike java.util.HashMap,
- * this class does NOT allow <tt>null</tt> to be used as a key or
- * value.  This class is also typically faster than ConcurrentHashMap
- * when there is usually only one thread updating the table, but
- * possibly many retrieving values from it.
- * <p>
- *
- * Implementation note: A slightly faster implementation of
- * this class will be possible once planned Java Memory Model
- * revisions are in place.
- *
- * <p>[<a href="http://gee.cs.oswego.edu/dl/classes/EDU/oswego/cs/dl/util/concurrent/intro.html"> Introduction to this package. </a>]
- **/
-public abstract class AbstractConcurrentReadCache extends AbstractMap implements Map, Cloneable, Serializable {
-    /**
-     * The default initial number of table slots for this table (32).
-     * Used when not otherwise specified in constructor.
-     **/
-    public static int DEFAULT_INITIAL_CAPACITY = 32;
-
-    /**
-     * The minimum capacity.
-     * Used if a lower value is implicitly specified
-     * by either of the constructors with arguments.
-     * MUST be a power of two.
-     */
-    private static final int MINIMUM_CAPACITY = 4;
-
-    /**
-     * The maximum capacity.
-     * Used if a higher value is implicitly specified
-     * by either of the constructors with arguments.
-     * MUST be a power of two <= 1<<30.
-     */
-    private static final int MAXIMUM_CAPACITY = 1 << 30;
-
-    /**
-     * The default load factor for this table.
-     * Used when not otherwise specified in constructor, the default is 0.75f.
-     **/
-    public static final float DEFAULT_LOAD_FACTOR = 0.75f;
-
-    //OpenSymphony BEGIN (pretty long!)
-    protected static final String NULL = "_nul!~";
-    protected static Log log = LogFactory.getLog(AbstractConcurrentReadCache.class);
-
-    /*
-      The basic strategy is an optimistic-style scheme based on
-      the guarantee that the hash table and its lists are always
-      kept in a consistent enough state to be read without locking:
-
-      * Read operations first proceed without locking, by traversing the
-         apparently correct list of the apparently correct bin. If an
-         entry is found, but not invalidated (value field null), it is
-         returned. If not found, operations must recheck (after a memory
-         barrier) to make sure they are using both the right list and
-         the right table (which can change under resizes). If
-         invalidated, reads must acquire main update lock to wait out
-         the update, and then re-traverse.
-
-      * All list additions are at the front of each bin, making it easy
-         to check changes, and also fast to traverse.  Entry next
-         pointers are never assigned. Remove() builds new nodes when
-         necessary to preserve this.
-
-      * Remove() (also clear()) invalidates removed nodes to alert read
-         operations that they must wait out the full modifications.
-
-    */
-
-    /**
-     * Lock used only for its memory effects. We use a Boolean
-     * because it is serializable, and we create a new one because
-     * we need a unique object for each cache instance.
-     **/
-    protected final Boolean barrierLock = new Boolean(true);
-
-    /**
-     * field written to only to guarantee lock ordering.
-     **/
-    protected transient Object lastWrite;
-
-    /**
-     * The hash table data.
-     */
-    protected transient Entry[] table;
-
-    /**
-     * The total number of mappings in the hash table.
-     */
-    protected transient int count;
-
-    /**
-     * Persistence listener.
-     */
-    protected PersistenceListener persistenceListener = null;
-
-    /**
-     * Use memory cache or not.
-     */
-    protected boolean memoryCaching = true;
-
-    /**
-     * Use unlimited disk caching.
-     */
-    protected boolean unlimitedDiskCache = false;
-
-    /**
-     * The load factor for the hash table.
-     *
-     * @serial
-     */
-    protected float loadFactor;
-
-    /**
-     * Default cache capacity (number of entries).
-     */
-    protected final int DEFAULT_MAX_ENTRIES = 100;
-
-    /**
-     * Max number of element in cache when considered unlimited.
-     */
-    protected final int UNLIMITED = 2147483646;
-    protected transient Collection values = null;
-
-    /**
-     * A HashMap containing the group information.
-     * Each entry uses the group name as the key, and holds a
-     * <code>Set</code> of containing keys of all
-     * the cache entries that belong to that particular group.
-     */
-    protected HashMap groups = null;
-    protected transient Set entrySet = null;
-
-    // Views
-    protected transient Set keySet = null;
-
-    /**
-     * Cache capacity (number of entries).
-     */
-    protected int maxEntries = DEFAULT_MAX_ENTRIES;
-
-    /**
-     * The table is rehashed when its size exceeds this threshold.
-     * (The value of this field is always (int)(capacity * loadFactor).)
-     *
-     * @serial
-     */
-    protected int threshold;
-
-    /**
-     * Use overflow persistence caching.
-     */
-    private boolean overflowPersistence = false;
-
-    /**
-     * Constructs a new, empty map with the specified initial capacity and the specified load factor.
-     *
-     * @param initialCapacity the initial capacity
-     *  The actual initial capacity is rounded to the nearest power of two.
-     * @param loadFactor  the load factor of the AbstractConcurrentReadCache
-     * @throws IllegalArgumentException  if the initial maximum number
-     *               of elements is less
-     *               than zero, or if the load factor is nonpositive.
-     */
-    public AbstractConcurrentReadCache(int initialCapacity, float loadFactor) {
-        if (loadFactor <= 0) {
-            throw new IllegalArgumentException("Illegal Load factor: " + loadFactor);
-        }
-
-        this.loadFactor = loadFactor;
-
-        int cap = p2capacity(initialCapacity);
-        table = new Entry[cap];
-        threshold = (int) (cap * loadFactor);
-    }
-
-    /**
-     * Constructs a new, empty map with the specified initial capacity and default load factor.
-     *
-     * @param   initialCapacity   the initial capacity of the
-     *                            AbstractConcurrentReadCache.
-     * @throws    IllegalArgumentException if the initial maximum number
-     *              of elements is less
-     *              than zero.
-     */
-    public AbstractConcurrentReadCache(int initialCapacity) {
-        this(initialCapacity, DEFAULT_LOAD_FACTOR);
-    }
-
-    /**
-     * Constructs a new, empty map with a default initial capacity and load factor.
-     */
-    public AbstractConcurrentReadCache() {
-        this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR);
-    }
-
-    /**
-     * Constructs a new map with the same mappings as the given map.
-     * The map is created with a capacity of twice the number of mappings in
-     * the given map or 11 (whichever is greater), and a default load factor.
-     */
-    public AbstractConcurrentReadCache(Map t) {
-        this(Math.max(2 * t.size(), 11), DEFAULT_LOAD_FACTOR);
-        putAll(t);
-    }
-
-    /**
-     * Returns <tt>true</tt> if this map contains no key-value mappings.
-     *
-     * @return <tt>true</tt> if this map contains no key-value mappings.
-     */
-    public synchronized boolean isEmpty() {
-        return count == 0;
-    }
-
-    /**
-     * Returns a set of the cache keys that reside in a particular group.
-     *
-     * @param   groupName The name of the group to retrieve.
-     * @return  a set containing all of the keys of cache entries that belong
-     * to this group, or <code>null</code> if the group was not found.
-     * @exception  NullPointerException if the groupName is <code>null</code>.
-     */
-    public Set getGroup(String groupName) {
-        if (log.isDebugEnabled()) {
-            log.debug("getGroup called (group=" + groupName + ")");
-        }
-
-        Set groupEntries = null;
-
-        if (memoryCaching && (groups != null)) {
-            groupEntries = (Set) getGroupForReading(groupName);
-        }
-
-        if (groupEntries == null) {
-            // Not in the map, try the persistence layer
-            groupEntries = persistRetrieveGroup(groupName);
-        }
-
-        return groupEntries;
-    }
-
-    /**
-     * Set the cache capacity
-     */
-    public void setMaxEntries(int newLimit) {
-        if (newLimit > 0) {
-            maxEntries = newLimit;
-
-            synchronized (this) { // because remove() isn't synchronized
-
-                while (size() > maxEntries) {
-                    remove(removeItem(), false);
-                }
-            }
-        } else {
-            // Capacity must be at least 1
-            throw new IllegalArgumentException("Cache maximum number of entries must be at least 1");
-        }
-    }
-
-    /**
-     * Retrieve the cache capacity (number of entries).
-     */
-    public int getMaxEntries() {
-        return maxEntries;
-    }
-
-    /**
-     * Sets the memory caching flag.
-     */
-    public void setMemoryCaching(boolean memoryCaching) {
-        this.memoryCaching = memoryCaching;
-    }
-
-    /**
-     * Check if memory caching is used.
-     */
-    public boolean isMemoryCaching() {
-        return memoryCaching;
-    }
-
-    /**
-     * Set the persistence listener to use.
-     */
-    public void setPersistenceListener(PersistenceListener listener) {
-        this.persistenceListener = listener;
-    }
-
-    /**
-     * Get the persistence listener.
-     */
-    public PersistenceListener getPersistenceListener() {
-        return persistenceListener;
-    }
-
-    /**
-     * Sets the unlimited disk caching flag.
-     */
-    public void setUnlimitedDiskCache(boolean unlimitedDiskCache) {
-        this.unlimitedDiskCache = unlimitedDiskCache;
-    }
-
-    /**
-     * Check if we use unlimited disk cache.
-     */
-    public boolean isUnlimitedDiskCache() {
-        return unlimitedDiskCache;
-    }
-
-    /**
-     * Check if we use overflowPersistence
-     *
-     * @return Returns the overflowPersistence.
-     */
-    public boolean isOverflowPersistence() {
-        return this.overflowPersistence;
-    }
-
-    /**
-     * Sets the overflowPersistence flag
-     *
-     * @param overflowPersistence The overflowPersistence to set.
-     */
-    public void setOverflowPersistence(boolean overflowPersistence) {
-        this.overflowPersistence = overflowPersistence;
-    }
-
-    /**
-     * Return the number of slots in this table.
-     **/
-    public synchronized int capacity() {
-        return table.length;
-    }
-
-    /**
-     * Removes all mappings from this map.
-     */
-    public synchronized void clear() {
-        Entry[] tab = table;
-
-        for (int i = 0; i < tab.length; ++i) {
-            // must invalidate all to force concurrent get's to wait and then retry
-            for (Entry e = tab[i]; e != null; e = e.next) {
-                e.value = null;
-
-                /** OpenSymphony BEGIN */
-                itemRemoved(e.key);
-
-                /** OpenSymphony END */
-            }
-
-            tab[i] = null;
-        }
-
-        // Clean out the entire disk cache
-        persistClear();
-
-        count = 0;
-        recordModification(tab);
-    }
-
-    /**
-     * Returns a shallow copy of this.
-     * <tt>AbstractConcurrentReadCache</tt> instance: the keys and
-     * values themselves are not cloned.
-     *
-     * @return a shallow copy of this map.
-     */
-    public synchronized Object clone() {
-        try {
-            AbstractConcurrentReadCache t = (AbstractConcurrentReadCache) super.clone();
-            t.keySet = null;
-            t.entrySet = null;
-            t.values = null;
-
-            Entry[] tab = table;
-            t.table = new Entry[tab.length];
-
-            Entry[] ttab = t.table;
-
-            for (int i = 0; i < tab.length; ++i) {
-                Entry first = tab[i];
-
-                if (first != null) {
-                    ttab[i] = (Entry) (first.clone());
-                }
-            }
-
-            return t;
-        } catch (CloneNotSupportedException e) {
-            // this shouldn't happen, since we are Cloneable
-            throw new InternalError();
-        }
-    }
-
-    /**
-     * Tests if some key maps into the specified value in this table.
-     * This operation is more expensive than the <code>containsKey</code>
-     * method.<p>
-     *
-     * Note that this method is identical in functionality to containsValue,
-     * (which is part of the Map interface in the collections framework).
-     *
-     * @param      value   a value to search for.
-     * @return     <code>true</code> if and only if some key maps to the
-     *             <code>value</code> argument in this table as
-     *             determined by the <tt>equals</tt> method;
-     *             <code>false</code> otherwise.
-     * @exception  NullPointerException  if the value is <code>null</code>.
-     * @see        #containsKey(Object)
-     * @see        #containsValue(Object)
-     * @see           Map
-     */
-    public boolean contains(Object value) {
-        return containsValue(value);
-    }
-
-    /**
-     * Tests if the specified object is a key in this table.
-     *
-     * @param   key   possible key.
-     * @return  <code>true</code> if and only if the specified object
-     *          is a key in this table, as determined by the
-     *          <tt>equals</tt> method; <code>false</code> otherwise.
-     * @exception  NullPointerException  if the key is
-     *               <code>null</code>.
-     * @see     #contains(Object)
-     */
-    public boolean containsKey(Object key) {
-        return get(key) != null;
-
-        /** OpenSymphony BEGIN */
-
-        // TODO: Also check the persistence?
-
-        /** OpenSymphony END */
-    }
-
-    /**
-     * Returns <tt>true</tt> if this map maps one or more keys to the
-     * specified value. Note: This method requires a full internal
-     * traversal of the hash table, and so is much slower than
-     * method <tt>containsKey</tt>.
-     *
-     * @param value value whose presence in this map is to be tested.
-     * @return <tt>true</tt> if this map maps one or more keys to the
-     * specified value.
-     * @exception  NullPointerException  if the value is <code>null</code>.
-     */
-    public boolean containsValue(Object value) {
-        if (value == null) {
-            throw new NullPointerException();
-        }
-
-        Entry[] tab = getTableForReading();
-
-        for (int i = 0; i < tab.length; ++i) {
-            for (Entry e = tab[i]; e != null; e = e.next) {
-                Object v = e.value;
-
-                if ((v != null) && value.equals(v)) {
-                    return true;
-                }
-            }
-        }
-
-        return false;
-    }
-
-    /**
-     * Returns an enumeration of the values in this table.
-     * Use the Enumeration methods on the returned object to fetch the elements
-     * sequentially.
-     *
-     * @return  an enumeration of the values in this table.
-     * @see     java.util.Enumeration
-     * @see     #keys()
-     * @see        #values()
-     * @see        Map
-     */
-    public Enumeration elements() {
-        return new ValueIterator();
-    }
-
-    /**
-     * Returns a collection view of the mappings contained in this map.
-     * Each element in the returned collection is a <tt>Map.Entry</tt>.  The
-     * collection is backed by the map, so changes to the map are reflected in
-     * the collection, and vice-versa.  The collection supports element
-     * removal, which removes the corresponding mapping from the map, via the
-     * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
-     * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
-     * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
-     *
-     * @return a collection view of the mappings contained in this map.
-     */
-    public Set entrySet() {
-        Set es = entrySet;
-
-        if (es != null) {
-            return es;
-        } else {
-            return entrySet = new AbstractSet() {
-                        public Iterator iterator() {
-                            return new HashIterator();
-                        }
-
-                        public boolean contains(Object o) {
-                            if (!(o instanceof Map.Entry)) {
-                                return false;
-                            }
-
-                            Map.Entry entry = (Map.Entry) o;
-                            Object key = entry.getKey();
-                            Object v = AbstractConcurrentReadCache.this.get(key);
-
-                            return (v != null) && v.equals(entry.getValue());
-                        }
-
-                        public boolean remove(Object o) {
-                            if (!(o instanceof Map.Entry)) {
-                                return false;
-                            }
-
-                            return AbstractConcurrentReadCache.this.findAndRemoveEntry((Map.Entry) o);
-                        }
-
-                        public int size() {
-                            return AbstractConcurrentReadCache.this.size();
-                        }
-
-                        public void clear() {
-                            AbstractConcurrentReadCache.this.clear();
-                        }
-                    };
-        }
-    }
-
-    /**
-     * Returns the value to which the specified key is mapped in this table.
-     *
-     * @param   key   a key in the table.
-     * @return  the value to which the key is mapped in this table;
-     *          <code>null</code> if the key is not mapped to any value in
-     *          this table.
-     * @exception  NullPointerException  if the key is
-     *               <code>null</code>.
-     * @see     #put(Object, Object)
-     */
-    public Object get(Object key) {
-        if (log.isDebugEnabled()) {
-            log.debug("get called (key=" + key + ")");
-        }
-
-        // throw null pointer exception if key null
-        int hash = hash(key);
-
-        /*
-           Start off at the apparently correct bin.  If entry is found, we
-           need to check after a barrier anyway.  If not found, we need a
-           barrier to check if we are actually in right bin. So either
-           way, we encounter only one barrier unless we need to retry.
-           And we only need to fully synchronize if there have been
-           concurrent modifications.
-        */
-        Entry[] tab = table;
-        int index = hash & (tab.length - 1);
-        Entry first = tab[index];
-        Entry e = first;
-
-        for (;;) {
-            if (e == null) {
-                // If key apparently not there, check to
-                // make sure this was a valid read
-                tab = getTableForReading();
-
-                if (first == tab[index]) {
-                    /** OpenSymphony BEGIN */
-
-                    /* Previous code
-                    return null;*/
-
-                    // Not in the table, try persistence
-                    Object value = persistRetrieve(key);
-
-                    if (value != null) {
-                        // Update the map, but don't persist the data
-                        put(key, value, false);
-                    }
-
-                    return value;
-
-                    /** OpenSymphony END */
-                } else {
-                    // Wrong list -- must restart traversal at new first
-                    e = first = tab[index = hash & (tab.length - 1)];
-                }
-            }
-            // checking for pointer equality first wins in most applications
-            else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
-                Object value = e.value;
-
-                if (value != null) {
-                    /** OpenSymphony BEGIN */
-
-                    /* Previous code
-                    return value;*/
-                    if (NULL.equals(value)) {
-                        // Memory cache disable, use disk
-                        value = persistRetrieve(e.key);
-
-                        if (value != null) {
-                            itemRetrieved(key);
-                        }
-
-                        return value; // fix [CACHE-13]
-                    } else {
-                        itemRetrieved(key);
-
-                        return value;
-                    }
-
-                    /** OpenSymphony END */
-                }
-
-                // Entry was invalidated during deletion. But it could
-                // have been re-inserted, so we must retraverse.
-                // To avoid useless contention, get lock to wait out modifications
-                // before retraversing.
-                synchronized (this) {
-                    tab = table;
-                }
-
-                e = first = tab[index = hash & (tab.length - 1)];
-            } else {
-                e = e.next;
-            }
-        }
-    }
-
-    /**
-     * Returns a set view of the keys contained in this map.
-     * The set is backed by the map, so changes to the map are reflected in the set, and
-     * vice-versa.  The set supports element removal, which removes the
-     * corresponding mapping from this map, via the <tt>Iterator.remove</tt>,
-     * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and
-     * <tt>clear</tt> operations.  It does not support the <tt>add</tt> or
-     * <tt>addAll</tt> operations.
-     *
-     * @return a set view of the keys contained in this map.
-     */
-    public Set keySet() {
-        Set ks = keySet;
-
-        if (ks != null) {
-            return ks;
-        } else {
-            return keySet = new AbstractSet() {
-                        public Iterator iterator() {
-                            return new KeyIterator();
-                        }
-
-                        public int size() {
-                            return AbstractConcurrentReadCache.this.size();
-                        }
-
-                        public boolean contains(Object o) {
-                            return AbstractConcurrentReadCache.this.containsKey(o);
-                        }
-
-                        public boolean remove(Object o) {
-                            return AbstractConcurrentReadCache.this.remove(o) != null;
-                        }
-
-                        public void clear() {
-                            AbstractConcurrentReadCache.this.clear();
-                        }
-                    };
-        }
-    }
-
-    /**
-     * Returns an enumeration of the keys in this table.
-     *
-     * @return  an enumeration of the keys in this table.
-     * @see     Enumeration
-     * @see     #elements()
-     * @see        #keySet()
-     * @see        Map
-     */
-    public Enumeration keys() {
-        return new KeyIterator();
-    }
-
-    /**
-     * Return the load factor
-     **/
-    public float loadFactor() {
-        return loadFactor;
-    }
-
-    /**
-     * Maps the specified <code>key</code> to the specified <code>value</code> in this table.
-     * Neither the key nor the
-     * value can be <code>null</code>. <p>
-     *
-     * The value can be retrieved by calling the <code>get</code> method
-     * with a key that is equal to the original key.
-     *
-     * @param      key     the table key.
-     * @param      value   the value.
-     * @return     the previous value of the specified key in this table,
-     *             or <code>null</code> if it did not have one.
-     * @exception  NullPointerException  if the key or value is
-     *               <code>null</code>.
-     * @see     Object#equals(Object)
-     * @see     #get(Object)
-     */
-    /** OpenSymphony BEGIN */
-    public Object put(Object key, Object value) {
-        // Call the internal put using persistance
-        return put(key, value, true);
-    }
-
-    /**
-     * Copies all of the mappings from the specified map to this one.
-     *
-     * These mappings replace any mappings that this map had for any of the
-     * keys currently in the specified Map.
-     *
-     * @param t Mappings to be stored in this map.
-     */
-    public synchronized void putAll(Map t) {
-        for (Iterator it = t.entrySet().iterator(); it.hasNext();) {
-            Map.Entry entry = (Map.Entry) it.next();
-            Object key = entry.getKey();
-            Object value = entry.getValue();
-            put(key, value);
-        }
-    }
-
-    /**
-     * Removes the key (and its corresponding value) from this table.
-     * This method does nothing if the key is not in the table.
-     *
-     * @param   key   the key that needs to be removed.
-     * @return  the value to which the key had been mapped in this table,
-     *          or <code>null</code> if the key did not have a mapping.
-     * @exception  NullPointerException  if the key is
-     *               <code>null</code>.
-     */
-    /** OpenSymphony BEGIN */
-    public Object remove(Object key) {
-        return remove(key, true);
-    }
-
-    /**
-     * Returns the total number of cache entries held in this map.
-     *
-     * @return the number of key-value mappings in this map.
-     */
-    public synchronized int size() {
-        return count;
-    }
-
-    /**
-     * Returns a collection view of the values contained in this map.
-     * The collection is backed by the map, so changes to the map are reflected in
-     * the collection, and vice-versa.  The collection supports element
-     * removal, which removes the corresponding mapping from this map, via the
-     * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
-     * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
-     * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
-     *
-     * @return a collection view of the values contained in this map.
-     */
-    public Collection values() {
-        Collection vs = values;
-
-        if (vs != null) {
-            return vs;
-        } else {
-            return values = new AbstractCollection() {
-                        public Iterator iterator() {
-                            return new ValueIterator();
-                        }
-
-                        public int size() {
-                            return AbstractConcurrentReadCache.this.size();
-                        }
-
-                        public boolean contains(Object o) {
-                            return AbstractConcurrentReadCache.this.containsValue(o);
-                        }
-
-                        public void clear() {
-                            AbstractConcurrentReadCache.this.clear();
-                        }
-                    };
-        }
-    }
-
-    /**
-     * Get ref to group.
-     * CACHE-127 Synchronized copying of the group entry set since
-     * the new HashSet(Collection c) constructor uses the iterator.
-     * This may slow things down but it is better than a
-     * ConcurrentModificationException.  We might have to revisit the
-     * code if performance is too adversely impacted.
-     **/
-    protected synchronized final Set getGroupForReading(String groupName) {
-        Set group = (Set) getGroupsForReading().get(groupName);
-        return new HashSet(group);
-    }
-
-    /**
-     * Get ref to groups.
-     * The reference and the cells it
-     * accesses will be at least as fresh as from last
-     * use of barrierLock
-     **/
-    protected final Map getGroupsForReading() {
-        synchronized (barrierLock) {
-            return groups;
-        }
-    }
-
-    /**
-     * Get ref to table; the reference and the cells it
-     * accesses will be at least as fresh as from last
-     * use of barrierLock
-     **/
-    protected final Entry[] getTableForReading() {
-        synchronized (barrierLock) {
-            return table;
-        }
-    }
-
-    /**
-     * Force a memory synchronization that will cause
-     * all readers to see table. Call only when already
-     * holding main synch lock.
-     **/
-    protected final void recordModification(Object x) {
-        synchronized (barrierLock) {
-            lastWrite = x;
-        }
-    }
-
-    /**
-     * Helper method for entrySet remove.
-     **/
-    protected synchronized boolean findAndRemoveEntry(Map.Entry entry) {
-        Object key = entry.getKey();
-        Object v = get(key);
-
-        if ((v != null) && v.equals(entry.getValue())) {
-            remove(key);
-
-            return true;
-        } else {
-            return false;
-        }
-    }
-
-    /**
-     * Remove an object from the persistence.
-     * @param key The key of the object to remove
-     */
-    protected void persistRemove(Object key) {
-        if (log.isDebugEnabled()) {
-            log.debug("PersistRemove called (key=" + key + ")");
-        }
-
-        if (persistenceListener != null) {
-            try {
-                persistenceListener.remove((String) key);
-            } catch (CachePersistenceException e) {
-                log.error("[oscache] Exception removing cache entry with key '" + key + "' from persistence", e);
-            }
-        }
-    }
-
-    /**
-     * Removes a cache group using the persistence listener.
-     * @param groupName The name of the group to remove
-     */
-    protected void persistRemoveGroup(String groupName) {
-        if (log.isDebugEnabled()) {
-            log.debug("persistRemoveGroup called (groupName=" + groupName + ")");
-        }
-
-        if (persistenceListener != null) {
-            try {
-                persistenceListener.removeGroup(groupName);
-            } catch (CachePersistenceException e) {
-                log.error("[oscache] Exception removing group " + groupName, e);
-            }
-        }
-    }
-
-    /**
-     * Retrieve an object from the persistence listener.
-     * @param key The key of the object to retrieve
-     */
-    protected Object persistRetrieve(Object key) {
-        if (log.isDebugEnabled()) {
-            log.debug("persistRetrieve called (key=" + key + ")");
-        }
-
-        Object entry = null;
-
-        if (persistenceListener != null) {
-            try {
-                entry = persistenceListener.retrieve((String) key);
-            } catch (CachePersistenceException e) {
-                /**
-                 * It is normal that we get an exception occasionally.
-                 * It happens when the item is invalidated (written or removed)
-                 * during read. The logic is constructed so that read is retried.
-                 */
-            }
-        }
-
-        return entry;
-    }
-
-    /**
-     * Retrieves a cache group using the persistence listener.
-     * @param groupName The name of the group to retrieve
-     */
-    protected Set persistRetrieveGroup(String groupName) {
-        if (log.isDebugEnabled()) {
-            log.debug("persistRetrieveGroup called (groupName=" + groupName + ")");
-        }
-
-        if (persistenceListener != null) {
-            try {
-                return persistenceListener.retrieveGroup(groupName);
-            } catch (CachePersistenceException e) {
-                log.error("[oscache] Exception retrieving group " + groupName, e);
-            }
-        }
-
-        return null;
-    }
-
-    /**
-     * Store an object in the cache using the persistence listener.
-     * @param key The object key
-     * @param obj The object to store
-     */
-    protected void persistStore(Object key, Object obj) {