2012-09-19 67 views
0

我使用memcached 1.4.7版和spymemcached 2.8.4作爲客戶端來設置並獲取關鍵值。在多線程和高負載環境中使用時,spymemcached客戶端無法設置緩存本身的值。使用多線程和高負載時,Spymemcached集不起作用

我正在運行我的負載測試程序,其中有40個長鍵,它們在20個工作線程中被等分。每個工作線程都會嘗試在緩存中設置1M個密鑰。因此有40個工作線程在運行。

在我的DefaultCache.java文件中,我創建了一個包含20個spymemcached客戶端的連接池。每當工作線程嘗試將密鑰設置爲緩存時,DefaultCache.java就會返回一個隨機客戶端,如getCache()方法中所示。

當我的程序退出時,它打印

總無鑰匙裝= 40000000

但是當我去的memcached telnet控制檯,它總是惦記幾千條記錄。我也通過隨機獲取輸出null的幾個鍵來驗證它。沒有驅逐和cmd_set,curr_items,total_items每個都等於39.5M

這些丟失的密鑰在緩存背後的原因是什麼?

這是供參考的代碼。

public class TestCacheLoader { 
public static final Long TOTAL_RECORDS = 40000000L; 
public static final Long LIMIT = 1000000L; 

public static void main(String[] args) { 
    long keyCount = loadKeyCacheData(); 
    System.out.println("Total no of keys loaded = " + keyCount); 
} 

public static long loadKeyCacheData() { 
    DefaultCache cache = new DefaultCache(); 
    List<Future<Long>> futureList = new ArrayList<Future<Long>>(); 
    ExecutorService executorThread = Executors.newFixedThreadPool(40); 
    long offset = 0; 
    long keyCount = 0; 
    long workerCount = 0; 
    try { 
     do { 
      List<Long> keyList = new ArrayList<Long>(LIMIT.intValue()); 
      for (long counter = offset; counter < (offset + LIMIT) && counter < TOTAL_RECORDS; counter++) { 
       keyList.add(counter); 
      } 
      if (keyList.size() != 0) { 
       System.out.println("Initiating a new worker thread " + workerCount++); 
       KeyCacheThread keyCacheThread = new KeyCacheThread(keyList, cache); 
       futureList.add(executorThread.submit(keyCacheThread)); 
      } 
      offset += LIMIT; 
     } while (offset < TOTAL_RECORDS); 
     for (Future<Long> future : futureList) { 
      keyCount += (Long) future.get(); 
     } 
    } catch (Exception e) { 
     e.printStackTrace(); 
    } finally { 
     cache.shutdown(); 
    } 
    return keyCount; 
} 

}

class KeyCacheThread implements Callable<Long> { 
private List<Long> keyList; 
private DefaultCache cache; 

public KeyCacheThread(List<Long> keyList, DefaultCache cache) { 
    this.keyList = keyList; 
    this.cache = cache; 
} 

public Long call() { 
    return createKeyCache(); 
} 

public Long createKeyCache() { 
    String compoundKey = ""; 
    long keyCounter = 0; 
    System.out.println(Thread.currentThread() + " started to process " + keyList.size() + " keys"); 
    for (Long key : keyList) { 
     keyCounter++; 
     compoundKey = key.toString(); 
     cache.set(compoundKey, 0, key); 
    } 
    System.out.println(Thread.currentThread() + " processed = " + keyCounter + " keys"); 
    return keyCounter; 
} 

}

public class DefaultCache { 
private static final Logger LOGGER = Logger.getLogger(DefaultCache.class); 

private MemcachedClient[] clients; 

public DefaultCache() { 
    this.cacheNamespace = ""; 
    this.cacheName = "keyCache"; 
    this.addresses = "127.0.0.1:11211"; 
    this.cacheLookupTimeout = 3000; 
    this.numberOfClients = 20; 

    try { 
     LOGGER.debug("Cache initialization started for the cache : " + cacheName); 
     ConnectionFactory connectionFactory = new DefaultConnectionFactory(DefaultConnectionFactory.DEFAULT_OP_QUEUE_LEN, 
       DefaultConnectionFactory.DEFAULT_READ_BUFFER_SIZE, DefaultHashAlgorithm.KETAMA_HASH) { 
      public NodeLocator createLocator(List<MemcachedNode> list) { 
       KetamaNodeLocator locator = new KetamaNodeLocator(list, DefaultHashAlgorithm.KETAMA_HASH); 
       return locator; 
      } 
     }; 

     clients = new MemcachedClient[numberOfClients]; 

     for (int i = 0; i < numberOfClients; i++) { 
      MemcachedClient client = new MemcachedClient(connectionFactory, AddrUtil.getAddresses(getServerAddresses(addresses))); 
      clients[i] = client; 
     } 
     LOGGER.debug("Cache initialization ended for the cache : " + cacheName); 
    } catch (IOException e) { 
     LOGGER.error("Exception occured while initializing cache : " + cacheName, e); 
     throw new CacheException("Exception occured while initializing cache : " + cacheName, e); 
    } 
} 

public Object get(String key) { 
    try { 
     return getCache().get(cacheNamespace + key); 
    } catch (Exception e) { 
     return null; 
    } 
} 

public void set(String key, Integer expiryTime, final Object value) { 
    getCache().set(cacheNamespace + key, expiryTime, value); 
} 

public Object delete(String key) { 
    return getCache().delete(cacheNamespace + key); 
} 

public void shutdown() { 
    for (MemcachedClient client : clients) { 
     client.shutdown(); 
    } 
} 

public void flush() { 
    for (MemcachedClient client : clients) { 
     client.flush(); 
    } 
} 

private MemcachedClient getCache() { 
    MemcachedClient client = null; 
    int i = (int) (Math.random() * numberOfClients); 
    client = clients[i]; 
    return client; 
} 

private String getServerAddresses(List<Address> addresses) { 
    StringBuilder addressStr = new StringBuilder(); 
    for (Address address : addresses) { 
     addressStr.append(address.getHost()).append(":").append(address.getPort()).append(" "); 
    } 
    return addressStr.toString().trim(); 
} 

}

+0

您確定這些密鑰不是剛剛從緩存中被逐出嗎? – mikewied

+0

是的,我可以看到從telnet stats命令顯示我驅逐= 0 –

回答

0

我不知道,但它似乎與spymemcached庫本身的問題。 我改變了DefaultCache.java文件的實現來使用xmemcached,並且一切都開始正常工作。現在我不會錯過任何記錄。遠程登錄統計顯示匹配的設置命令數。

感謝您的耐心。

1

我看見了。原因是它們用於異步操作的反應堆模式。這意味着每1個連接1個工作線程。這1個線程是高負載和多線程機器下的bootleneck。 1個線程只能加載1個CPU,其餘23個空閒。

我們想出了連接池,它增加了工作線程並允許使用更多的硬件電源。檢出項目3levelmemcache at github