22#include "tensor.h"
33#include "utils/arr_rm_alloc.h"
44
5- TF_DataType RDL_GetTFDataTypeFromDL (DLDataType dtype ) {
5+ TF_DataType RAI_GetTFDataTypeFromDL (DLDataType dtype ) {
66
77 if (dtype .code == kDLFloat ) {
88 switch (dtype .bits ) {
@@ -41,7 +41,7 @@ TF_DataType RDL_GetTFDataTypeFromDL(DLDataType dtype) {
4141 return 0 ;
4242}
4343
44- DLDataType RDL_GetDLDataTypeFromTF (TF_DataType dtype ) {
44+ DLDataType RAI_GetDLDataTypeFromTF (TF_DataType dtype ) {
4545 switch (dtype ) {
4646 case TF_FLOAT :
4747 return (DLDataType ){ .code = kDLFloat , .bits = 32 , .lanes = 1 };
@@ -65,8 +65,8 @@ DLDataType RDL_GetDLDataTypeFromTF(TF_DataType dtype) {
6565 return (DLDataType ){ .bits = 0 };
6666}
6767
68- RDL_Tensor * RDL_TensorCreateFromTFTensor (TF_Tensor * tensor ) {
69- RDL_Tensor * ret = RedisModule_Alloc (sizeof (* ret ));
68+ RAI_Tensor * RAI_TensorCreateFromTFTensor (TF_Tensor * tensor ) {
69+ RAI_Tensor * ret = RedisModule_Alloc (sizeof (* ret ));
7070
7171 DLContext ctx = (DLContext ){
7272 .device_type = kDLCPU ,
@@ -85,21 +85,21 @@ RDL_Tensor* RDL_TensorCreateFromTFTensor(TF_Tensor *tensor) {
8585 // Redis be responsible for the memory, or we reuse the TF
8686 // allocated memory, which might not be optimal down the road
8787 // Note: on YOLO this has no impact on perf
88- #ifdef RDL_COPY_RUN_OUTPUT
88+ #ifdef RAI_COPY_RUN_OUTPUT
8989 size_t len = TF_TensorByteSize (tensor );
9090 char * data = RedisModule_Alloc (len * sizeof (* data ));
9191 memcpy (data , TF_TensorData (tensor ), len );
9292#endif
9393
9494 ret -> tensor = (DLTensor ){
9595 .ctx = ctx ,
96- #ifdef RDL_COPY_RUN_OUTPUT
96+ #ifdef RAI_COPY_RUN_OUTPUT
9797 .data = data ,
9898#else
9999 .data = TF_TensorData (tensor ),
100100#endif
101101 .ndim = ndims ,
102- .dtype = RDL_GetDLDataTypeFromTF (TF_TensorType (tensor )),
102+ .dtype = RAI_GetDLDataTypeFromTF (TF_TensorType (tensor )),
103103 .shape = shape ,
104104 .strides = NULL ,
105105 .byte_offset = 0
@@ -109,34 +109,34 @@ RDL_Tensor* RDL_TensorCreateFromTFTensor(TF_Tensor *tensor) {
109109 return ret ;
110110}
111111
112- void RDL_TFDeallocator (void * data , size_t len , void * arg ) {
112+ void RAI_TFDeallocator (void * data , size_t len , void * arg ) {
113113 // printf("DEALLOCATOR CALLED\n");
114114 // do nothing, memory is managed by Redis
115115}
116116
117- TF_Tensor * RDL_TFTensorFromTensor ( RDL_Tensor * t ){
118- #ifdef RDL_COPY_RUN_INPUT
117+ TF_Tensor * RAI_TFTensorFromTensor ( RAI_Tensor * t ){
118+ #ifdef RAI_COPY_RUN_INPUT
119119 TF_Tensor * out = TF_AllocateTensor (
120- RDL_GetTFDataTypeFromDL (t -> tensor .dtype ),
120+ RAI_GetTFDataTypeFromDL (t -> tensor .dtype ),
121121 t -> tensor .shape ,
122122 t -> tensor .ndim ,
123- RDL_TensorByteSize (t ));
123+ RAI_TensorByteSize (t ));
124124 memcpy (TF_TensorData (out ), t -> tensor .data , TF_TensorByteSize (out ));
125125 return out ;
126126#else
127127 return TF_NewTensor (
128- RDL_GetTFDataTypeFromDL (t -> tensor .dtype ),
128+ RAI_GetTFDataTypeFromDL (t -> tensor .dtype ),
129129 t -> tensor .shape ,
130130 t -> tensor .ndim ,
131131 t -> tensor .data ,
132- RDL_TensorByteSize (t ),
133- & RDL_TFDeallocator ,
132+ RAI_TensorByteSize (t ),
133+ & RAI_TFDeallocator ,
134134 NULL );
135- #endif /* RDL_COPY_RUN_INPUT */
135+ #endif /* RAI_COPY_RUN_INPUT */
136136}
137137
138138
139- RDL_Graph * RDL_GraphCreateTF (const char * prefix , RDL_Backend backend ,
139+ RAI_Graph * RAI_GraphCreateTF (const char * prefix , RAI_Backend backend ,
140140 const char * graphdef , size_t graphlen ) {
141141 TF_Graph * graph = TF_NewGraph ();
142142
@@ -173,7 +173,7 @@ RDL_Graph *RDL_GraphCreateTF(const char *prefix, RDL_Backend backend,
173173 TF_DeleteSessionOptions (sessionOptions );
174174 TF_DeleteStatus (sessionStatus );
175175
176- RDL_Graph * ret = RedisModule_Alloc (sizeof (* ret ));
176+ RAI_Graph * ret = RedisModule_Alloc (sizeof (* ret ));
177177 ret -> graph = graph ;
178178 ret -> session = session ;
179179 ret -> backend = backend ;
@@ -182,7 +182,7 @@ RDL_Graph *RDL_GraphCreateTF(const char *prefix, RDL_Backend backend,
182182 return ret ;
183183}
184184
185- void RDL_GraphFreeTF ( RDL_Graph * graph ) {
185+ void RAI_GraphFreeTF ( RAI_Graph * graph ) {
186186 TF_Status * status = TF_NewStatus ();
187187 TF_CloseSession (graph -> session , status );
188188
@@ -207,7 +207,7 @@ void RDL_GraphFreeTF(RDL_Graph* graph) {
207207 TF_DeleteStatus (status );
208208}
209209
210- int RDL_GraphRunTF ( RDL_GraphRunCtx * gctx ) {
210+ int RAI_GraphRunTF ( RAI_GraphRunCtx * gctx ) {
211211 TF_Status * status = TF_NewStatus ();
212212
213213 TF_Tensor * inputTensorsValues [array_len (gctx -> inputs )];
@@ -216,7 +216,7 @@ int RDL_GraphRunTF(RDL_GraphRunCtx* gctx) {
216216 TF_Output outputs [array_len (gctx -> outputs )];
217217
218218 for (size_t i = 0 ; i < array_len (gctx -> inputs ); ++ i ) {
219- inputTensorsValues [i ] = RDL_TFTensorFromTensor (gctx -> inputs [i ].tensor );
219+ inputTensorsValues [i ] = RAI_TFTensorFromTensor (gctx -> inputs [i ].tensor );
220220 TF_Output port ;
221221 port .oper = TF_GraphOperationByName (gctx -> graph -> graph , gctx -> inputs [i ].name );
222222 port .index = 0 ;
@@ -249,8 +249,8 @@ int RDL_GraphRunTF(RDL_GraphRunCtx* gctx) {
249249 }
250250
251251 for (size_t i = 0 ; i < array_len (gctx -> outputs ) ; ++ i ) {
252- RDL_Tensor * output_tensor = RDL_TensorCreateFromTFTensor (outputTensorsValues [i ]);
253- gctx -> outputs [i ].tensor = RDL_TensorGetShallowCopy (output_tensor );
252+ RAI_Tensor * output_tensor = RAI_TensorCreateFromTFTensor (outputTensorsValues [i ]);
253+ gctx -> outputs [i ].tensor = RAI_TensorGetShallowCopy (output_tensor );
254254 }
255255
256256 TF_DeleteStatus (status );
0 commit comments