package cache
import (
"sync"
"time"
)
type Item struct {
value interface{}
expiration int64
}
type Cache struct {
items map[string]Item
mu sync.RWMutex
defaultExpiration time.Duration
cleanupInterval time.Duration
}
func NewCache(defaultExpiration, cleanupInterval time.Duration) *Cache {
cache := &Cache{
items: make(map[string]Item),
defaultExpiration: defaultExpiration,
cleanupInterval: cleanupInterval,
}
go cache.cleanupExpired()
return cache
}
func (c *Cache) Set(key string, value interface{}, expiration time.Duration) {
c.mu.Lock()
defer c.mu.Unlock()
var exp int64
now := time.Now().UnixNano()
if expiration > 0 {
exp = now + int64(expiration)
} else {
exp = now + int64(c.defaultExpiration)
}
item := Item{
value: value,
expiration: exp,
}
c.items[key] = item
}
func (c *Cache) Get(key string) (interface{}, bool) {
c.mu.RLock()
defer c.mu.RUnlock()
item, found := c.items[key]
if !found {
return nil, false
}
if time.Now().UnixNano() > item.expiration {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.items, key)
return nil, false
}
return item.value, true
}
func (c *Cache) cleanupExpired() {
for {
time.Sleep(c.cleanupInterval)
now := time.Now().UnixNano()
c.mu.Lock()
for key, item := range c.items {
if now > item.expiration {
delete(c.items, key)
}
}
c.mu.Unlock()
}
}
func TestCache1(t *testing.T) {
cache := NewCache(2*time.Second, 2*time.Second)
start := time.Now()
for i := 1; i < 9999999; i++ {
cache.Set(fmt.Sprintf("%d", i), cast.ToString(i), 2*time.Second)
//if i%2 == 0 {
// endTime := time.Now()
// duration := endTime.Sub(start)
// if duration.Milliseconds() > 100 {
// fmt.Println("timeUnit", duration.Milliseconds(), "ms")
// }
// start = time.Now()
//}
if i%100000 == 0 {
var m runtime.MemStats
runtime.ReadMemStats(&m)
fmt.Println(cast.ToString(m.Alloc/1024/1024)+"MB",
cast.ToString(m.TotalAlloc/1024/1024)+"MB")
}
}
endTime := time.Now()
duration := endTime.Sub(start)
fmt.Println("timeUnit", duration.Milliseconds(), "ms")
}
1551MB 1940MB
1555MB 1944MB
timeUnit 5759 ms
还有一个基于 sync.map 的
package cache
import (
"sync"
"time"
)
//var cacheStd = NewCache(time.Second*5, time.Second*10)
type Item struct {
value interface{}
expiration int64
}
type Cache struct {
items sync.Map
defaultExpiration time.Duration
cleanupInterval time.Duration
}
func NewCache(defaultExpiration, cleanupInterval time.Duration) *Cache {
cache := &Cache{
defaultExpiration: defaultExpiration,
cleanupInterval: cleanupInterval,
}
go cache.cleanupExpired()
return cache
}
func (c *Cache) Set(key string, value interface{}, expiration time.Duration) {
var exp int64
now := time.Now().UnixNano()
if expiration > 0 {
exp = now + int64(expiration)
} else {
exp = now + int64(c.defaultExpiration)
}
item := Item{
value: value,
expiration: exp,
}
c.items.Store(key, item)
}
func (c *Cache) Get(key string) (interface{}, bool) {
item, found := c.items.Load(key)
if !found {
return nil, false
}
cachedItem := item.(Item)
if time.Now().UnixNano() > cachedItem.expiration {
c.items.Delete(key)
return nil, false
}
return cachedItem.value, true
}
func (c *Cache) cleanupExpired() {
for {
time.Sleep(c.cleanupInterval)
now := time.Now().UnixNano()
c.items.Range(func(key, value interface{}) bool {
item := value.(Item)
if now > item.expiration {
c.items.Delete(key)
}
return true
})
}
}
//func GetFromCache[T any](key string, action func() T) T {
// data, ok := cacheStd.Get(key)
// if ok {
// return data.(T)
// }
// res := action()
// cacheStd.Set(key, res, 0)
// return res
//}
测试结果差了一倍
2461MB 3114MB
2473MB 3126MB
timeUnit 12503 ms
想对应的 java 代码
package com.example.jtool.controller;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
public class Cache {
private ConcurrentHashMap<String, Item> items;
private long defaultExpiration;
private long cleanupInterval;
private ScheduledExecutorService executor;
public Cache(long defaultExpiration, long cleanupInterval) {
this.items = new ConcurrentHashMap<>();
this.defaultExpiration = defaultExpiration;
this.cleanupInterval = cleanupInterval;
this.executor = Executors.newSingleThreadScheduledExecutor();
this.executor.scheduleAtFixedRate(this::cleanupExpired, cleanupInterval, cleanupInterval, TimeUnit.NANOSECONDS);
}
public void set(String key, Object value, long expiration) {
long exp = expiration > 0 ? System.nanoTime() + expiration : System.nanoTime() + defaultExpiration;
Item item = new Item(value, exp);
items.put(key, item);
}
public Object get(String key) {
Item item = items.get(key);
if (item == null || System.nanoTime() > item.getExpiration()) {
items.remove(key);
return null;
}
return item.getValue();
}
private void cleanupExpired() {
long now = System.nanoTime();
items.forEach((key, value) -> {
Item item = value;
if (now > item.expiration) {
items.remove(key);
}
});
}
public static void main(String[] args) {
long startTime = System.currentTimeMillis();
// 在这里放置需要测量时间的代码
Cache cache = new Cache(2000000000L, 20000000000L); // 5 seconds, 10 seconds
for (Integer i = 1; i < 9999999; i++ ){
cache.set(i.toString(), i.toString(), 2000000000L);
if( i%100000 == 0 ){
Runtime runtime = Runtime.getRuntime();
long memoryUsed = runtime.totalMemory() - runtime.freeMemory();
System.out.println("Memory used: " + memoryUsed/1024/1024 + "MB");
}
}
System.out.println("end");
long endTime = System.currentTimeMillis();
long elapsedTime = endTime - startTime;
System.out.println("程序运行时间:" + elapsedTime + " 毫秒");
}
}
class Item {
private Object value;
public long expiration;
public Item(Object value, long expiration) {
this.value = value;
this.expiration = expiration;
}
public Object getValue() {
return value;
}
public long getExpiration() {
return expiration;
}
}
Memory used: 1632MB
Memory used: 1648MB
Memory used: 1664MB
Memory used: 1680MB
Memory used: 1680MB
end
程序运行时间:3020 毫秒
更加不能理解的是,go 版本的 cache 测试过程中会有明显的阻塞感。有时候可能达到几十上百。有没有同学清楚 go 版本的 cache 哪里写的有问题
这是一个专为移动设备优化的页面(即为了让你能够在 Google 搜索结果里秒开这个页面),如果你希望参与 V2EX 社区的讨论,你可以继续到 V2EX 上打开本讨论主题的完整版本。
V2EX 是创意工作者们的社区,是一个分享自己正在做的有趣事物、交流想法,可以遇见新朋友甚至新机会的地方。
V2EX is a community of developers, designers and creative people.