Commits

Anonymous committed da93a1c

replace measureDeep in key cache with custom calculation
patch by Vijay; reviewed by Jonathan Ellis for CASSANDRA-4860

Comments (0)

Files changed (15)

src/java/org/apache/cassandra/cache/CacheKey.java

 
 import org.apache.cassandra.utils.Pair;
 
-public interface CacheKey
+public interface CacheKey extends IMeasurableMemory
 {
     /**
      * @return The keyspace and ColumnFamily names to which this key belongs

src/java/org/apache/cassandra/cache/ConcurrentLinkedHashCache.java

 
 import java.util.Set;
 
-import org.github.jamm.MemoryMeter;
-
 import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap;
 import com.googlecode.concurrentlinkedhashmap.EntryWeigher;
 
 /** Wrapper so CLHM can implement ICache interface.
  *  (this is what you get for making library classes final.) */
-public class ConcurrentLinkedHashCache<K, V> implements ICache<K, V>
+public class ConcurrentLinkedHashCache<K extends IMeasurableMemory, V extends IMeasurableMemory> implements ICache<K, V>
 {
     public static final int DEFAULT_CONCURENCY_LEVEL = 64;
     private final ConcurrentLinkedHashMap<K, V> map;
-    private static final MemoryMeter meter = new MemoryMeter().omitSharedBufferOverhead();
 
     private ConcurrentLinkedHashCache(ConcurrentLinkedHashMap<K, V> map)
     {
     /**
      * Initialize a cache with initial capacity with weightedCapacity
      */
-    public static <K, V> ConcurrentLinkedHashCache<K, V> create(long weightedCapacity, EntryWeigher<K, V> entryWeiger)
+    public static <K extends IMeasurableMemory, V extends IMeasurableMemory> ConcurrentLinkedHashCache<K, V> create(long weightedCapacity, EntryWeigher<K, V> entryWeiger)
     {
         ConcurrentLinkedHashMap<K, V> map = new ConcurrentLinkedHashMap.Builder<K, V>()
                                             .weigher(entryWeiger)
         return new ConcurrentLinkedHashCache<K, V>(map);
     }
 
-    public static <K, V> ConcurrentLinkedHashCache<K, V> create(long weightedCapacity)
+    public static <K extends IMeasurableMemory, V extends IMeasurableMemory> ConcurrentLinkedHashCache<K, V> create(long weightedCapacity)
     {
         return create(weightedCapacity, new EntryWeigher<K, V>()
         {
             public int weightOf(K key, V value)
             {
-                long size = meter.measureDeep(key) + meter.measureDeep(value);
+                long size = key.memorySize() + value.memorySize();
                 assert size < Integer.MAX_VALUE : "Serialized size cannot be more than 2GB/Integer.MAX_VALUE";
                 return (int) size;
             }

src/java/org/apache/cassandra/cache/IMeasurableMemory.java

+package org.apache.cassandra.cache;
+
+public interface IMeasurableMemory
+{
+    public long memorySize();
+}

src/java/org/apache/cassandra/cache/IRowCacheEntry.java

  */
 package org.apache.cassandra.cache;
 
-public interface IRowCacheEntry
+public interface IRowCacheEntry extends IMeasurableMemory
 {
 }

src/java/org/apache/cassandra/cache/KeyCacheKey.java

 
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.ObjectSizes;
 import org.apache.cassandra.utils.Pair;
 
 public class KeyCacheKey implements CacheKey
         return String.format("KeyCacheKey(%s, %s)", desc, ByteBufferUtil.bytesToHex(ByteBuffer.wrap(key)));
     }
 
+    public long memorySize()
+    {
+        long fields = ObjectSizes.getReferenceSize() + ObjectSizes.getSizeWithRef(key);
+        return ObjectSizes.getFieldSize(fields);
+    }
+
     @Override
     public boolean equals(Object o)
     {

src/java/org/apache/cassandra/cache/RowCacheKey.java

 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.ObjectSizes;
 import org.apache.cassandra.utils.Pair;
 
 public class RowCacheKey implements CacheKey, Comparable<RowCacheKey>
         return Schema.instance.getCF(cfId);
     }
 
+    public long memorySize()
+    {
+        long fields = ObjectSizes.getReferenceSize() + ObjectSizes.getSizeWithRef(key);
+        return ObjectSizes.getFieldSize(fields);
+    }
+
     @Override
     public boolean equals(Object o)
     {

src/java/org/apache/cassandra/cache/RowCacheSentinel.java

 
 import java.util.concurrent.atomic.AtomicLong;
 
+import org.apache.cassandra.utils.ObjectSizes;
+
 import com.google.common.base.Objects;
 
 /**
     {
         return Objects.hashCode(sentinelId);
     }
+
+    public long memorySize()
+    {
+        // Only long reference.
+        return ObjectSizes.getFieldSize(8);
+    }
 }

src/java/org/apache/cassandra/db/ColumnFamily.java

         return size;
     }
 
+    public long memorySize()
+    {
+        return ObjectSizes.measureDeep(this);
+    }
+
     public long maxTimestamp()
     {
         long maxTimestamp = deletionInfo().maxTimestamp();

src/java/org/apache/cassandra/db/DeletionInfo.java

 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.IntervalTree;
+import org.apache.cassandra.utils.ObjectSizes;
 
 public class DeletionInfo
 {
         return topLevel.equals(that.topLevel) && ranges.equals(that.ranges);
     }
 
+    public long memorySize()
+    {
+        long fields = topLevel.memorySize() + (2 * ObjectSizes.getReferenceSize());
+        if (ranges != null && !ranges.isEmpty())
+            fields += ObjectSizes.measureDeep(ranges);
+        return ObjectSizes.getFieldSize(fields);
+    }
+
     @Override
     public final int hashCode()
     {

src/java/org/apache/cassandra/db/DeletionTime.java

 import com.google.common.base.Objects;
 
 import org.apache.cassandra.io.ISerializer;
+import org.apache.cassandra.utils.ObjectSizes;
 
 public class DeletionTime implements Comparable<DeletionTime>
 {
         return column.isMarkedForDelete() && column.getMarkedForDeleteAt() <= markedForDeleteAt;
     }
 
+    public long memorySize()
+    {
+        long fields = TypeSizes.NATIVE.sizeof(markedForDeleteAt) + TypeSizes.NATIVE.sizeof(localDeletionTime);
+        return ObjectSizes.getFieldSize(fields);
+    }
+
     private static class Serializer implements ISerializer<DeletionTime>
     {
         public void serialize(DeletionTime delTime, DataOutput out) throws IOException

src/java/org/apache/cassandra/db/RowIndexEntry.java

 import java.util.Collections;
 import java.util.List;
 
+import org.apache.cassandra.cache.IMeasurableMemory;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.IndexHelper;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.utils.IFilter;
 import org.apache.cassandra.utils.FilterFactory;
+import org.apache.cassandra.utils.ObjectSizes;
 
-public class RowIndexEntry
+public class RowIndexEntry implements IMeasurableMemory
 {
     public static final Serializer serializer = new Serializer();
 
         throw new UnsupportedOperationException();
     }
 
+    public long memorySize()
+    {
+        long fields = TypeSizes.NATIVE.sizeof(position) + ObjectSizes.getReferenceSize(); 
+        return ObjectSizes.getFieldSize(fields);
+    }
+
     public static class Serializer
     {
         public void serialize(RowIndexEntry rie, DataOutput dos) throws IOException
             assert size <= Integer.MAX_VALUE;
             return (int)size;
         }
+
+        public long memorySize()
+        {
+            long internal = 0;
+            for (IndexHelper.IndexInfo idx : columnsIndex)
+                internal += idx.memorySize();
+            long listSize = ObjectSizes.getFieldSize(ObjectSizes.getArraySize(columnsIndex.size(), internal) + 4);
+            return ObjectSizes.getFieldSize(deletionInfo.memorySize() + listSize);
+        }
     }
 }

src/java/org/apache/cassandra/io/sstable/IndexHelper.java

         {
             return new IndexInfo(ByteBufferUtil.readWithShortLength(dis), ByteBufferUtil.readWithShortLength(dis), dis.readLong(), dis.readLong());
         }
+
+        public long memorySize()
+        {
+            long fields = ObjectSizes.getSize(firstName) + ObjectSizes.getSize(lastName) + 8 + 8; 
+            return ObjectSizes.getFieldSize(fields);
+        }
     }
 }

src/java/org/apache/cassandra/utils/ObjectSizes.java

+package org.apache.cassandra.utils;
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryPoolMXBean;
+import java.nio.ByteBuffer;
+
+import org.github.jamm.MemoryMeter;
+
+/**
+ * Modified version of the code from.
+ * https://github.com/twitter/commons/blob/master
+ * /src/java/com/twitter/common/objectsize/ObjectSizeCalculator.java
+ *
+ * Difference is that we don't use reflection.
+ */
+public class ObjectSizes
+{
+    public static final MemoryLayoutSpecification SPEC = getEffectiveMemoryLayoutSpecification();
+    private static final MemoryMeter meter = new MemoryMeter().omitSharedBufferOverhead();
+
+    /**
+     * Describes constant memory overheads for various constructs in a JVM
+     * implementation.
+     */
+    public interface MemoryLayoutSpecification
+    {
+        int getArrayHeaderSize();
+
+        int getObjectHeaderSize();
+
+        int getObjectPadding();
+
+        int getReferenceSize();
+
+        int getSuperclassFieldPadding();
+    }
+
+    public static long getFieldSize(long fieldsSize)
+    {
+        return roundTo(SPEC.getObjectHeaderSize() + fieldsSize, SPEC.getObjectPadding());
+    }
+
+    public static long getSuperClassFieldSize(long fieldsSize)
+    {
+        return roundTo(fieldsSize, SPEC.getSuperclassFieldPadding());
+    }
+
+    public static long getArraySize(int length, long elementSize)
+    {
+        return roundTo(SPEC.getArrayHeaderSize() + length * elementSize, SPEC.getObjectPadding());
+    }
+
+    public static long getSizeWithRef(byte[] bytes)
+    {
+        return SPEC.getReferenceSize() + getArraySize(bytes.length, 1);
+    }
+
+    public static long getSize(ByteBuffer buffer)
+    {
+        long size = 0;
+        /* BB Class */
+        // final byte[] hb;
+        // final int offset;
+        // boolean isReadOnly;
+        size += ObjectSizes.getFieldSize(1L + 4 + ObjectSizes.getReferenceSize() + ObjectSizes.getArraySize(buffer.capacity(), 1));
+        /* Super Class */
+        // private int mark;
+        // private int position;
+        // private int limit;
+        // private int capacity;
+        size += ObjectSizes.getSuperClassFieldSize(4L + 4 + 4 + 4 + 8);
+        return size;
+    }
+
+    public static long getSizeWithRef(ByteBuffer buffer)
+    {
+        return SPEC.getReferenceSize() + getSize(buffer);
+    }
+
+    public static long roundTo(long x, int multiple)
+    {
+        return ((x + multiple - 1) / multiple) * multiple;
+    }
+
+    public static int getReferenceSize()
+    {
+        return SPEC.getReferenceSize();
+    }
+
+    private static MemoryLayoutSpecification getEffectiveMemoryLayoutSpecification()
+    {
+        final String dataModel = System.getProperty("sun.arch.data.model");
+        if ("32".equals(dataModel))
+        {
+            // Running with 32-bit data model
+            return new MemoryLayoutSpecification()
+            {
+                public int getArrayHeaderSize()
+                {
+                    return 12;
+                }
+
+                public int getObjectHeaderSize()
+                {
+                    return 8;
+                }
+
+                public int getObjectPadding()
+                {
+                    return 8;
+                }
+
+                public int getReferenceSize()
+                {
+                    return 4;
+                }
+
+                public int getSuperclassFieldPadding()
+                {
+                    return 4;
+                }
+            };
+        }
+
+        final String strVmVersion = System.getProperty("java.vm.version");
+        final int vmVersion = Integer.parseInt(strVmVersion.substring(0, strVmVersion.indexOf('.')));
+        if (vmVersion >= 17)
+        {
+            long maxMemory = 0;
+            for (MemoryPoolMXBean mp : ManagementFactory.getMemoryPoolMXBeans())
+            {
+                maxMemory += mp.getUsage().getMax();
+            }
+            if (maxMemory < 30L * 1024 * 1024 * 1024)
+            {
+                // HotSpot 17.0 and above use compressed OOPs below 30GB of RAM
+                // total for all memory pools (yes, including code cache).
+                return new MemoryLayoutSpecification()
+                {
+                    public int getArrayHeaderSize()
+                    {
+                        return 16;
+                    }
+
+                    public int getObjectHeaderSize()
+                    {
+                        return 12;
+                    }
+
+                    public int getObjectPadding()
+                    {
+                        return 8;
+                    }
+
+                    public int getReferenceSize()
+                    {
+                        return 4;
+                    }
+
+                    public int getSuperclassFieldPadding()
+                    {
+                        return 4;
+                    }
+                };
+            }
+        }
+
+        /* Worst case we over count. */
+
+        // In other cases, it's a 64-bit uncompressed OOPs object model
+        return new MemoryLayoutSpecification()
+        {
+            public int getArrayHeaderSize()
+            {
+                return 24;
+            }
+
+            public int getObjectHeaderSize()
+            {
+                return 16;
+            }
+
+            public int getObjectPadding()
+            {
+                return 8;
+            }
+
+            public int getReferenceSize()
+            {
+                return 8;
+            }
+
+            public int getSuperclassFieldPadding()
+            {
+                return 8;
+            }
+        };
+    }
+
+    public static long measureDeep(Object pojo)
+    {
+        return meter.measureDeep(pojo);
+    }
+}

test/unit/org/apache/cassandra/cache/CacheProviderTest.java

 
 public class CacheProviderTest extends SchemaLoader
 {
-    String key1 = "key1";
-    String key2 = "key2";
-    String key3 = "key3";
-    String key4 = "key4";
-    String key5 = "key5";
+    MeasureableString key1 = new MeasureableString("key1");
+    MeasureableString key2 = new MeasureableString("key2");
+    MeasureableString key3 = new MeasureableString("key3");
+    MeasureableString key4 = new MeasureableString("key4");
+    MeasureableString key5 = new MeasureableString("key5");
     private static final long CAPACITY = 4;
     private String tableName = "Keyspace1";
     private String cfName = "Standard1";
 
-    private void simpleCase(ColumnFamily cf, ICache<String, IRowCacheEntry> cache)
+    private void simpleCase(ColumnFamily cf, ICache<MeasureableString, IRowCacheEntry> cache)
     {
         cache.put(key1, cf);
         assert cache.get(key1) != null;
     }
 
     // TODO this isn't terribly useful
-    private void concurrentCase(final ColumnFamily cf, final ICache<String, IRowCacheEntry> cache) throws InterruptedException
+    private void concurrentCase(final ColumnFamily cf, final ICache<MeasureableString, IRowCacheEntry> cache) throws InterruptedException
     {
         Runnable runable = new Runnable()
         {
     @Test
     public void testHeapCache() throws InterruptedException
     {
-        ICache<String, IRowCacheEntry> cache = ConcurrentLinkedHashCache.create(CAPACITY, Weighers.<String, IRowCacheEntry>entrySingleton());
+        ICache<MeasureableString, IRowCacheEntry> cache = ConcurrentLinkedHashCache.create(CAPACITY, Weighers.<MeasureableString, IRowCacheEntry>entrySingleton());
         ColumnFamily cf = createCF();
         simpleCase(cf, cache);
         concurrentCase(cf, cache);
     @Test
     public void testSerializingCache() throws InterruptedException
     {
-        ICache<String, IRowCacheEntry> cache = SerializingCache.create(CAPACITY, Weighers.<RefCountedMemory>singleton(), new SerializingCacheProvider.RowCacheSerializer());
+        ICache<MeasureableString, IRowCacheEntry> cache = SerializingCache.create(CAPACITY, Weighers.<RefCountedMemory>singleton(), new SerializingCacheProvider.RowCacheSerializer());
         ColumnFamily cf = createCF();
         simpleCase(cf, cache);
         concurrentCase(cf, cache);
         assertNotSame(key1, key3);
         assertNotSame(key1.hashCode(), key3.hashCode());
     }
+
+    private class MeasureableString implements IMeasurableMemory
+    {
+        public final String string;
+
+        public MeasureableString(String input)
+        {
+            this.string = input;
+        }
+
+        public long memorySize()
+        {
+            return string.length();
+        }
+    }
 }

test/unit/org/apache/cassandra/cache/ObjectSizeTest.java

+package org.apache.cassandra.cache;
+
+import java.nio.ByteBuffer;
+import java.util.UUID;
+
+import junit.framework.Assert;
+
+import org.apache.cassandra.db.DeletionInfo;
+import org.apache.cassandra.db.RowIndexEntry;
+import org.apache.cassandra.utils.ObjectSizes;
+import org.github.jamm.MemoryMeter;
+import org.junit.Test;
+
+public class ObjectSizeTest
+{
+    public static final MemoryMeter meter = new MemoryMeter().omitSharedBufferOverhead();
+
+    @Test
+    public void testArraySizes()
+    {
+        long size = ObjectSizes.getArraySize(0, 1);
+        long size2 = meter.measureDeep(new byte[0]);
+        Assert.assertEquals(size, size2);
+    }
+
+    @Test
+    public void testBiggerArraySizes()
+    {
+        long size = ObjectSizes.getArraySize(0, 1);
+        long size2 = meter.measureDeep(new byte[0]);
+        Assert.assertEquals(size, size2);
+
+        size = ObjectSizes.getArraySize(8, 1);
+        size2 = meter.measureDeep(new byte[8]);
+        Assert.assertEquals(size, size2);
+    }
+
+    @Test
+    public void testKeyCacheKey()
+    {
+        KeyCacheKey key = new KeyCacheKey(null, ByteBuffer.wrap(new byte[0]));
+        long size = key.memorySize();
+        long size2 = meter.measureDeep(key);
+        Assert.assertEquals(size, size2);
+    }
+
+    @Test
+    public void testKeyCacheValue()
+    {
+        RowIndexEntry entry = new RowIndexEntry(123);
+        long size = entry.memorySize();
+        long size2 = meter.measureDeep(entry);
+        Assert.assertEquals(size, size2);
+    }
+
+    @Test
+    public void testKeyCacheValueWithDelInfo()
+    {
+        RowIndexEntry entry = RowIndexEntry.create(123, new DeletionInfo(123, 123), null);
+        long size = entry.memorySize();
+        long size2 = meter.measureDeep(entry);
+        Assert.assertEquals(size, size2);
+    }
+
+    @Test
+    public void testRowCacheKey()
+    {
+        UUID id = UUID.randomUUID();
+        RowCacheKey key = new RowCacheKey(id, ByteBuffer.wrap(new byte[11]));
+        long size = key.memorySize();
+        long size2 = meter.measureDeep(key) - meter.measureDeep(id);
+        Assert.assertEquals(size, size2);
+    }
+
+    @Test
+    public void testRowCacheSentinel()
+    {
+        RowCacheSentinel sentinel = new RowCacheSentinel(123);
+        long size = sentinel.memorySize();
+        long size2 = meter.measureDeep(sentinel);
+        Assert.assertEquals(size, size2);
+    }
+}
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.