@@ -6,46 +6,46 @@ import {
66 IncrementTuple ,
77 KeyValueDBTuple ,
88} from '@naturalcycles/db-lib'
9- import { _chunk , StringMap } from '@naturalcycles/js-lib'
109import { ReadableTyped } from '@naturalcycles/nodejs-lib'
11- import { RedisClient } from './redisClient'
1210import { RedisKeyValueDBCfg } from './redisKeyValueDB'
1311
14- export interface RedisHashKeyValueDBCfg extends RedisKeyValueDBCfg {
15- hashKey : string
16- }
17-
18- export class RedisHashKeyValueDB implements CommonKeyValueDB , AsyncDisposable {
19- client : RedisClient
20- keyOfHashField : string
21-
22- constructor ( cfg : RedisHashKeyValueDBCfg ) {
23- this . client = cfg . client
24- this . keyOfHashField = cfg . hashKey
25- }
12+ /**
13+ * RedisHashKeyValueDB is a KeyValueDB implementation that uses hash fields to simulate tables.
14+ * The value in the `table` arguments points to a hash field in Redis.
15+ *
16+ * The reason for having this approach and also the traditional RedisKeyValueDB is that
17+ * the currently available Redis versions (in Memorystore, or on MacOs) do not support
18+ * expiring hash properties.
19+ * The expiring fields feature is important, and only available via RedisKeyValueDB.
20+ *
21+ * Once the available Redis version reaches 7.4.0+,
22+ * this implementation can take over for RedisKeyValueDB.
23+ */
24+ export class RedishHashKeyValueDB implements CommonKeyValueDB , AsyncDisposable {
25+ constructor ( public cfg : RedisKeyValueDBCfg ) { }
2626
2727 support = {
2828 ...commonKeyValueDBFullSupport ,
2929 }
3030
3131 async ping ( ) : Promise < void > {
32- await this . client . ping ( )
32+ await this . cfg . client . ping ( )
3333 }
3434
3535 async [ Symbol . asyncDispose ] ( ) : Promise < void > {
36- await this . client . disconnect ( )
36+ await this . cfg . client . disconnect ( )
3737 }
3838
3939 async getByIds ( table : string , ids : string [ ] ) : Promise < KeyValueDBTuple [ ] > {
4040 if ( ! ids . length ) return [ ]
4141 // we assume that the order of returned values is the same as order of input ids
42- const bufs = await this . client . hmgetBuffer ( this . keyOfHashField , this . idsToKeys ( table , ids ) )
42+ const bufs = await this . cfg . client . hmgetBuffer ( table , ids )
4343 return bufs . map ( ( buf , i ) => [ ids [ i ] , buf ] as KeyValueDBTuple ) . filter ( ( [ _k , v ] ) => v !== null )
4444 }
4545
4646 async deleteByIds ( table : string , ids : string [ ] ) : Promise < void > {
4747 if ( ! ids . length ) return
48- await this . client . hdel ( this . keyOfHashField , this . idsToKeys ( table , ids ) )
48+ await this . cfg . client . hdel ( table , ids )
4949 }
5050
5151 async saveBatch (
@@ -55,106 +55,70 @@ export class RedisHashKeyValueDB implements CommonKeyValueDB, AsyncDisposable {
5555 ) : Promise < void > {
5656 if ( ! entries . length ) return
5757
58- const entriesWithKey = entries . map ( ( [ k , v ] ) => [ this . idToKey ( table , k ) , v ] )
59- const map : StringMap < any > = Object . fromEntries ( entriesWithKey )
58+ const record = Object . fromEntries ( entries )
6059
6160 if ( opt ?. expireAt ) {
62- await this . client . hsetWithTTL ( this . keyOfHashField , map , opt . expireAt )
61+ await this . cfg . client . hsetWithTTL ( table , record , opt . expireAt )
6362 } else {
64- await this . client . hset ( this . keyOfHashField , map )
63+ await this . cfg . client . hset ( table , record )
6564 }
6665 }
6766
6867 streamIds ( table : string , limit ?: number ) : ReadableTyped < string > {
69- let stream = this . client
70- . hscanStream ( this . keyOfHashField , {
71- match : `${ table } :*` ,
72- } )
68+ const stream = this . cfg . client
69+ . hscanStream ( table )
7370 . flatMap ( keyValueList => {
7471 const keys : string [ ] = [ ]
75- keyValueList . forEach ( ( keyOrValue , index ) => {
76- if ( index % 2 !== 0 ) return
77- keys . push ( keyOrValue )
78- } )
79- return this . keysToIds ( table , keys )
72+ for ( let i = 0 ; i < keyValueList . length ; i += 2 ) {
73+ keys . push ( keyValueList [ i ] ! )
74+ }
75+ return keys
8076 } )
81-
82- if ( limit ) {
83- stream = stream . take ( limit )
84- }
77+ . take ( limit || Infinity )
8578
8679 return stream
8780 }
8881
8982 streamValues ( table : string , limit ?: number ) : ReadableTyped < Buffer > {
90- return this . client
91- . hscanStream ( this . keyOfHashField , {
92- match : `${ table } :*` ,
93- } )
83+ return this . cfg . client
84+ . hscanStream ( table )
9485 . flatMap ( keyValueList => {
95- const values : string [ ] = [ ]
96- keyValueList . forEach ( ( keyOrValue , index ) => {
97- if ( index % 2 !== 1 ) return
98- values . push ( keyOrValue )
99- } )
100- return values . map ( v => Buffer . from ( v ) )
86+ const values : Buffer [ ] = [ ]
87+ for ( let i = 0 ; i < keyValueList . length ; i += 2 ) {
88+ const value = Buffer . from ( keyValueList [ i + 1 ] ! )
89+ values . push ( value )
90+ }
91+ return values
10192 } )
10293 . take ( limit || Infinity )
10394 }
10495
10596 streamEntries ( table : string , limit ?: number ) : ReadableTyped < KeyValueDBTuple > {
106- return this . client
107- . hscanStream ( this . keyOfHashField , {
108- match : `${ table } :*` ,
109- } )
97+ return this . cfg . client
98+ . hscanStream ( table )
11099 . flatMap ( keyValueList => {
111- const entries = _chunk ( keyValueList , 2 )
112- return entries . map ( ( [ k , v ] ) => {
113- return [ this . keyToId ( table , String ( k ) ) , Buffer . from ( String ( v ) ) ] satisfies KeyValueDBTuple
114- } )
100+ const entries : [ string , Buffer ] [ ] = [ ]
101+ for ( let i = 0 ; i < keyValueList . length ; i += 2 ) {
102+ const key = keyValueList [ i ] !
103+ const value = Buffer . from ( keyValueList [ i + 1 ] ! )
104+ entries . push ( [ key , value ] )
105+ }
106+ return entries
115107 } )
116108 . take ( limit || Infinity )
117109 }
118110
119111 async count ( table : string ) : Promise < number > {
120- return await this . client . hscanCount ( this . keyOfHashField , {
121- match : `${ table } :*` ,
122- } )
112+ return await this . cfg . client . hscanCount ( table )
123113 }
124114
125115 async incrementBatch ( table : string , increments : IncrementTuple [ ] ) : Promise < IncrementTuple [ ] > {
126- const incrementTuplesWithInternalKeys = increments . map (
127- ( [ id , v ] ) => [ this . idToKey ( table , id ) , v ] as [ string , number ] ,
128- )
129- const resultsWithInternalKeys = await this . client . hincrBatch (
130- this . keyOfHashField ,
131- incrementTuplesWithInternalKeys ,
132- )
133- const results = resultsWithInternalKeys . map (
134- ( [ k , v ] ) => [ this . keyToId ( table , k ) , v ] as IncrementTuple ,
135- )
136- return results
116+ return await this . cfg . client . hincrBatch ( table , increments )
137117 }
138118
139119 async createTable ( table : string , opt ?: CommonDBCreateOptions ) : Promise < void > {
140120 if ( ! opt ?. dropIfExists ) return
141121
142- await this . client . dropTable ( table )
143- }
144-
145- private idsToKeys ( table : string , ids : string [ ] ) : string [ ] {
146- return ids . map ( id => this . idToKey ( table , id ) )
147- }
148-
149- private idToKey ( table : string , id : string ) : string {
150- return `${ table } :${ id } `
151- }
152-
153- private keysToIds ( table : string , keys : string [ ] ) : string [ ] {
154- return keys . map ( key => this . keyToId ( table , key ) )
155- }
156-
157- private keyToId ( table : string , key : string ) : string {
158- return key . slice ( table . length + 1 )
122+ await this . cfg . client . del ( [ table ] )
159123 }
160124}
0 commit comments