Redis存储键值方式详解

2022-08-13 17:52:38 浏览数 (1)

redis是一个存储键值对的内存数据库,其存储键值的方式和Java中的HashMap相似。表征redis数据库的结构体是redisDb (在server.h文件中),redis服务器默认有16个数据库,编号从0到15。

typedef struct redisDb {     dict *dict;                /* 键空间 */     dict *expires;              /* 过期键空间 */     dict *blocking_keys;        /* 客户端在等待的键 (BLPOP) */     dict *ready_keys;          /* 接收到 push 的阻塞键 */     dict *watched_keys;        /* WATCHED keys for MULTI/EXEC CAS */     struct evictionPoolEntry *eviction_pool;    /* Eviction pool of keys */     int id;                    /* Database ID */     long long avg_ttl;          /* Average TTL, just for stats */ } redisDb;

dict 中存储的是 key -> value,而expires存储的 key -> 过期时间

dict是dict.h文件中定义的结构体:

typedef struct dict {     dictType *type;     void *privdata;     dictht ht[2];     long rehashidx; /* rehashing not in progress if rehashidx == -1 */     unsigned long iterators; /* number of iterators currently running */ } dict;

typedef struct dictht {     dictEntry **table;     unsigned long size; //table的大小     unsigned long sizemask;     unsigned long used; //table中键值对的数量 } dictht;

typedef struct dictEntry {     void *key;     union {         void *val;         uint64_t u64;         int64_t s64;         double d;     } v;     struct dictEntry *next; } dictEntry;

dict可以类比为java中的HashMap,dictEntry对应java.util.HashMap.Entry<K, V>,稍微不同的是,dict对entry的table做了简单的封装(即dictht),而且dict中有两个table用于rehash。

分析dict的dictReplace(dict.c文件中),类似于HashMap的put:

/* Add or Overwrite:  * Add an element, discarding the old value if the key already exists.  * Return 1 if the key was added from scratch, 0 if there was already an  * element with such key and dictReplace() just performed a value update  * operation. */ int dictReplace(dict *d, void *key, void *val) {     dictEntry *entry, *existing, auxentry;

    /* Try to add the element. If the key     * does not exists dictAdd will suceed. */     entry = dictAddRaw(d,key,&existing);     if (entry) {         dictSetVal(d, entry, val);         return 1;     }

    /* Set the new value and free the old one. Note that it is important     * to do that in this order, as the value may just be exactly the same     * as the previous one. In this context, think to reference counting,     * you want to increment (set), and then decrement (free), and not the     * reverse. */     auxentry = *existing;     dictSetVal(d, existing, val);     dictFreeVal(d, &auxentry);     return 0; }

dictEntry *dictAddRaw(dict *d, void *key, dictEntry **existing) {     int index;     dictEntry *entry;     dictht *ht;

    if (dictIsRehashing(d)) _dictRehashStep(d);

    /* Get the index of the new element, or -1 if     * the element already exists. */     if ((index = _dictKeyIndex(d, key, dictHashKey(d,key), existing)) == -1)         return NULL;

    /* Allocate the memory and store the new entry.     * Insert the element in top, with the assumption that in a database     * system it is more likely that recently added entries are accessed     * more frequently. */     ht = dictIsRehashing(d) ? &d->ht[1] : &d->ht[0];     entry = zmalloc(sizeof(*entry));     entry->next = ht->table[index];     ht->table[index] = entry;     ht->used ;

    /* Set the hash entry fields. */     dictSetKey(d, entry, key);     return entry; }

主要逻辑在dictAddRaw中,也是先取得table中index,然后使用头插法插入到table的链表中。

如果dict处于rehash状态(即rehashidx !=  -1),则在插入的时候,先调用_dictRehashStep,对于rehash中的dict,使用的table是ht[1]。

static void _dictRehashStep(dict *d) {     if (d->iterators == 0) dictRehash(d,1); }

int dictRehash(dict *d, int n) {     int empty_visits = n*10; /* Max number of empty buckets to visit. */     if (!dictIsRehashing(d)) return 0;

    while(n-- && d->ht[0].used != 0) {         dictEntry *de, *nextde;

        /* Note that rehashidx can't overflow as we are sure there are more         * elements because ht[0].used != 0 */         assert(d->ht[0].size > (unsigned long)d->rehashidx);         while(d->ht[0].table[d->rehashidx] == NULL) {             d->rehashidx ;             if (--empty_visits == 0) return 1;         }         de = d->ht[0].table[d->rehashidx];         /* Move all the keys in this bucket from the old to the new hash HT */         while(de) {             unsigned int h;

            nextde = de->next;             /* Get the index in the new hash table */             h = dictHashKey(d, de->key) & d->ht[1].sizemask;             de->next = d->ht[1].table[h];             d->ht[1].table[h] = de;             d->ht[0].used--;             d->ht[1].used ;             de = nextde;         }         d->ht[0].table[d->rehashidx] = NULL;         d->rehashidx ;     }

    /* Check if we already rehashed the whole table... */     if (d->ht[0].used == 0) {         zfree(d->ht[0].table);         d->ht[0] = d->ht[1];         _dictReset(&d->ht[1]);         d->rehashidx = -1;         return 0;     }

    /* More to rehash... */     return 1; }

从代码中可以看出:rehashidx标记了ht[0]中正在rehash的链表的index。

那么,在什么情况下,redis会对dict进行rehash呢?

调用栈: _dictKeyIndex -> _dictExpandIfNeeded -> dictExpand。在计算键的index时,会判断是否需要扩展dict,如果需要扩展,则把dict的rehashidx置为0。

static int _dictKeyIndex(dict *d, const void *key, unsigned int hash, dictEntry **existing) {     unsigned int idx, table;     dictEntry *he;     if (existing) *existing = NULL;

    /* Expand the hash table if needed */     if (_dictExpandIfNeeded(d) == DICT_ERR)         return -1;     for (table = 0; table <= 1; table ) {         idx = hash & d->ht[table].sizemask;         /* Search if this slot does not already contain the given key */         he = d->ht[table].table[idx];         while(he) {             if (key==he->key || dictCompareKeys(d, key, he->key)) {                 if (existing) *existing = he;                 return -1;             }             he = he->next;         }         if (!dictIsRehashing(d)) break;     }     return idx; }

/* Expand the hash table if needed */ static int _dictExpandIfNeeded(dict *d) {     /* Incremental rehashing already in progress. Return. */     if (dictIsRehashing(d)) return DICT_OK;

    /* If the hash table is empty expand it to the initial size. */     if (d->ht[0].size == 0) return dictExpand(d, DICT_HT_INITIAL_SIZE);

    /* If we reached the 1:1 ratio, and we are allowed to resize the hash     * table (global setting) or we should avoid it but the ratio between     * elements/buckets is over the "safe" threshold, we resize doubling     * the number of buckets. */     if (d->ht[0].used >= d->ht[0].size &&         (dict_can_resize ||         d->ht[0].used/d->ht[0].size > dict_force_resize_ratio))     {         return dictExpand(d, d->ht[0].used*2);     }     return DICT_OK; }

/* Expand or create the hash table */ int dictExpand(dict *d, unsigned long size) {     dictht n; /* the new hash table */     unsigned long realsize = _dictNextPower(size);

    /* the size is invalid if it is smaller than the number of     * elements already inside the hash table */     if (dictIsRehashing(d) || d->ht[0].used > size)         return DICT_ERR;

    /* Rehashing to the same table size is not useful. */     if (realsize == d->ht[0].size) return DICT_ERR;

    /* Allocate the new hash table and initialize all pointers to NULL */     n.size = realsize;     n.sizemask = realsize-1;     n.table = zcalloc(realsize*sizeof(dictEntry*));     n.used = 0;

    /* Is this the first initialization? If so it's not really a rehashing     * we just set the first hash table so that it can accept keys. */     if (d->ht[0].table == NULL) {         d->ht[0] = n;         return DICT_OK;     }

    /* Prepare a second hash table for incremental rehashing */     d->ht[1] = n;     d->rehashidx = 0;     return DICT_OK; }

从数据结构的角度来看,redis的dict和java的HashMap很像,区别在于rehash:HashMap在resize时是一次性拷贝的,然后使用新的数组,而dict维持了2个dictht,平常使用ht[0],一旦开始rehash则使用ht[0]和ht[1],rehash被分摊到每次的dictAdd和dictFind等操作中。

dictEntry *dictFind(dict *d, const void *key) {     dictEntry *he;     unsigned int h, idx, table;

    if (d->ht[0].used d->ht[1].used == 0) return NULL; /* dict is empty */     if (dictIsRehashing(d)) _dictRehashStep(d);     h = dictHashKey(d, key);     for (table = 0; table <= 1; table ) { //会遍历d->ht[0]和d->ht[1]         idx = h & d->ht[table].sizemask;         he = d->ht[table].table[idx];         while(he) {             if (key==he->key || dictCompareKeys(d, key, he->key))                 return he; //找到即返回             he = he->next;         }         if (!dictIsRehashing(d)) return NULL;     }     return NULL; }

redis为什么要如此设计?

试想一下,如果和java的HashMap一样,redis也是一次性拷贝,那么当这个dict非常大时,拷贝就会比较耗时,而在这段时间内,redis就无法对外提供服务了。

 这种设计增加了复杂度,开始rehash后,dict的数据分散在ht[0]和ht[1]中,对于查询(dictFind)和删除(dictDelete)和设置(dictReplace),则会遍历ht[0]和ht[1]。

0 人点赞