@@ -40,7 +40,8 @@ def torch_script_run(env, script_key):
4040
4141 con .execute_command ('AI.TENSORSET' , 'b{1}' , 'FLOAT' , 2 , 2 , 'VALUES' , 2 , 3 , 2 , 3 )
4242
43- con .execute_command ('AI.SCRIPTRUN' , script_key , 'bar' , 'INPUTS' , 'a{1}' , 'b{1}' , 'OUTPUTS' , 'c{1}' )
43+ con .execute_command ('AI.SCRIPTEXECUTE' , script_key , 'bar' , 'KEYS' , 1 , '{1}' , 'INPUTS' , 2 , 'a{1}' , 'b{1}' ,
44+ 'OUTPUTS' , 1 , 'c{1}' )
4445
4546 ensureSlaveSynced (con , env )
4647
@@ -216,3 +217,160 @@ def test_v2_tensor(self):
216217 self .env .assertEqual ([tensor_type , tensor_shape ], [b"INT32" , [2 , 1 ]])
217218 values = con .execute_command ('AI.TENSORGET' , key_name , 'VALUES' )
218219 self .env .assertEqual (values , [1 , 2 ])
220+
221+
222+ class TestAofRewrite :
223+
224+ def __init__ (self ):
225+ self .env = Env (useAof = True )
226+
227+ def test_aof_rewrite_tf_model (self ):
228+ key_name = "tf_graph{1}"
229+ con = self .env .getConnection ()
230+ tf_model = load_file_content ("graph.pb" )
231+ con .execute_command ('AI.MODELSTORE' , key_name , 'TF' , 'CPU' , 'TAG' , 'TF_GRAPH' , 'batchsize' , 4 , 'minbatchsize' , 2 ,
232+ 'minbatchtimeout' , 1000 , 'INPUTS' , 2 , 'a' , 'b' , 'OUTPUTS' , 1 , 'mul' , 'BLOB' , tf_model )
233+
234+ # Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
235+ self .env .restartAndReload ()
236+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
237+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
238+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
239+ [b"TF" , b"CPU" , b"TF_GRAPH" , 4 , 2 , 1000 , [b"a" , b"b" ], [b"mul" ]])
240+ tf_model_run (self .env , key_name )
241+
242+ # Reinsert the model (without minbatchtimeout)
243+ con .execute_command ('AI.MODELSTORE' , key_name , 'TF' , 'CPU' , 'TAG' , 'TF_GRAPH1' , 'batchsize' , 4 , 'minbatchsize' , 2 ,
244+ 'INPUTS' , 2 , 'a' , 'b' , 'OUTPUTS' , 1 , 'mul' , 'BLOB' , tf_model )
245+ # Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
246+ self .env .restartAndReload ()
247+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
248+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
249+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
250+ [b"TF" , b"CPU" , b"TF_GRAPH1" , 4 , 2 , 0 , [b"a" , b"b" ], [b"mul" ]])
251+
252+ # Reinsert the model (without minbatch)
253+ con .execute_command ('AI.MODELSTORE' , key_name , 'TF' , 'CPU' , 'TAG' , 'TF_GRAPH2' , 'batchsize' , 4 ,
254+ 'INPUTS' , 2 , 'a' , 'b' , 'OUTPUTS' , 1 , 'mul' , 'BLOB' , tf_model )
255+ # Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
256+ self .env .restartAndReload ()
257+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
258+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
259+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
260+ [b"TF" , b"CPU" , b"TF_GRAPH2" , 4 , 0 , 0 , [b"a" , b"b" ], [b"mul" ]])
261+
262+ # Reinsert the model (without batching)
263+ con .execute_command ('AI.MODELSTORE' , key_name , 'TF' , 'CPU' , 'TAG' , 'TF_GRAPH3' ,
264+ 'INPUTS' , 2 , 'a' , 'b' , 'OUTPUTS' , 1 , 'mul' , 'BLOB' , tf_model )
265+ # Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
266+ self .env .restartAndReload ()
267+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
268+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
269+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
270+ [b"TF" , b"CPU" , b"TF_GRAPH3" , 0 , 0 , 0 , [b"a" , b"b" ], [b"mul" ]])
271+
272+ def test_aof_rewrite_torch_model (self ):
273+ key_name = "pt-minimal{1}"
274+ con = self .env .getConnection ()
275+ torch_model = load_file_content ("pt-minimal.pt" )
276+ con .execute_command ('AI.MODELSTORE' , key_name , 'TORCH' , 'CPU' , 'TAG' , 'PT_MINIMAL' , 'batchsize' , 4 , 'minbatchsize' , 2 ,
277+ 'minbatchtimeout' , 1000 , 'BLOB' , torch_model )
278+
279+ # Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
280+ self .env .restartAndReload ()
281+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
282+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
283+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
284+ [b"TORCH" , b"CPU" , b"PT_MINIMAL" , 4 , 2 , 1000 , [b"a" , b"b" ], [b'' ]])
285+ torch_model_run (self .env , key_name )
286+
287+ # Reinsert the model (without minbatchtimeout)
288+ con .execute_command ('AI.MODELSTORE' , key_name , 'TORCH' , 'CPU' , 'TAG' , 'PT_MINIMAL1' , 'batchsize' , 4 , 'minbatchsize' , 2 ,
289+ 'BLOB' , torch_model )
290+ self .env .restartAndReload ()
291+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
292+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
293+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
294+ [b"TORCH" , b"CPU" , b"PT_MINIMAL1" , 4 , 2 , 0 , [b"a" , b"b" ], [b'' ]])
295+
296+ # Reinsert the model (without minbatch)
297+ con .execute_command ('AI.MODELSTORE' , key_name , 'TORCH' , 'CPU' , 'TAG' , 'PT_MINIMAL2' , 'batchsize' , 4 ,
298+ 'BLOB' , torch_model )
299+ self .env .restartAndReload ()
300+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
301+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
302+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
303+ [b"TORCH" , b"CPU" , b"PT_MINIMAL2" , 4 , 0 , 0 , [b"a" , b"b" ], [b'' ]])
304+
305+ # Reinsert the model (without batching)
306+ con .execute_command ('AI.MODELSTORE' , key_name , 'TORCH' , 'CPU' , 'TAG' , 'PT_MINIMAL3' ,
307+ 'BLOB' , torch_model )
308+ self .env .restartAndReload ()
309+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
310+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
311+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
312+ [b"TORCH" , b"CPU" , b"PT_MINIMAL3" , 0 , 0 , 0 , [b"a" , b"b" ], [b'' ]])
313+
314+ def test_aof_rewrite_troch_script (self ):
315+ key_name = "torch_script{1}"
316+ con = self .env .getConnection ()
317+ torch_script = load_file_content ("script.txt" )
318+ con .execute_command ('AI.SCRIPTSET' , key_name , 'CPU' , 'TAG' , 'TORCH_SCRIPT' , 'SOURCE' , torch_script )
319+
320+ # Redis should save the stored script by calling the AOF rewrite callback and then reload from AOF.
321+ self .env .restartAndReload ()
322+ _ , device , _ , tag = con .execute_command ("AI.SCRIPTGET" , key_name , "META" )
323+ self .env .assertEqual ([device , tag ], [b"CPU" , b"TORCH_SCRIPT" ])
324+ torch_script_run (self .env , key_name )
325+
326+ def test_aof_rewrite_onnx_model (self ):
327+ key_name = "linear_iris{1}"
328+ con = self .env .getConnection ()
329+ onnx_model = load_file_content ("linear_iris.onnx" )
330+ con .execute_command ('AI.MODELSTORE' , key_name , 'ONNX' , 'CPU' , 'TAG' , 'ONNX_LINEAR_IRIS' , 'batchsize' , 4 , 'minbatchsize' , 2 ,
331+ 'minbatchtimeout' , 1000 , 'BLOB' , onnx_model )
332+ # Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
333+ self .env .restartAndReload ()
334+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
335+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
336+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
337+ [b"ONNX" , b"CPU" , b"ONNX_LINEAR_IRIS" , 4 , 2 , 1000 , [b'float_input' ], [b'variable' ]])
338+ onnx_model_run (self .env , key_name )
339+
340+ # Reinsert the model (without minbatchtimeout)
341+ con .execute_command ('AI.MODELSTORE' , key_name , 'ONNX' , 'CPU' , 'TAG' , 'ONNX_LINEAR_IRIS1' , 'batchsize' , 4 ,
342+ 'minbatchsize' , 2 , 'BLOB' , onnx_model )
343+ self .env .restartAndReload ()
344+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
345+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
346+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
347+ [b"ONNX" , b"CPU" , b"ONNX_LINEAR_IRIS1" , 4 , 2 , 0 , [b'float_input' ], [b'variable' ]])
348+
349+ # Reinsert the model (without minbatch)
350+ con .execute_command ('AI.MODELSTORE' , key_name , 'ONNX' , 'CPU' , 'TAG' , 'ONNX_LINEAR_IRIS2' , 'batchsize' , 4 ,
351+ 'BLOB' , onnx_model )
352+ self .env .restartAndReload ()
353+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
354+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
355+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
356+ [b"ONNX" , b"CPU" , b"ONNX_LINEAR_IRIS2" , 4 , 0 , 0 , [b'float_input' ], [b'variable' ]])
357+
358+ # Reinsert the model (without batching)
359+ con .execute_command ('AI.MODELSTORE' , key_name , 'ONNX' , 'CPU' , 'TAG' , 'ONNX_LINEAR_IRIS3' ,
360+ 'BLOB' , onnx_model )
361+ self .env .restartAndReload ()
362+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
363+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
364+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
365+ [b"ONNX" , b"CPU" , b"ONNX_LINEAR_IRIS3" , 0 , 0 , 0 , [b'float_input' ], [b'variable' ]])
366+
367+ def test_aof_rewrite_tensor (self ):
368+ key_name = "tensor{1}"
369+ con = self .env .getConnection ()
370+ con .execute_command ('AI.TENSORSET' , key_name , 'INT32' , 2 , 1 , 'VALUES' , 1 , 2 )
371+ # Redis should save the stored tensor by calling the AOF rewrite callback and then reload from AOF.
372+ self .env .restartAndReload ()
373+ _ , tensor_type , _ , tensor_shape = con .execute_command ('AI.TENSORGET' , key_name , 'META' )
374+ self .env .assertEqual ([tensor_type , tensor_shape ], [b"INT32" , [2 , 1 ]])
375+ values = con .execute_command ('AI.TENSORGET' , key_name , 'VALUES' )
376+ self .env .assertEqual (values , [1 , 2 ])
0 commit comments