forked from zerollzeng/tiny-tensorrt
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathTrt.h
More file actions
259 lines (207 loc) · 8.58 KB
/
Trt.h
File metadata and controls
259 lines (207 loc) · 8.58 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
/*
* @Date: 2019-08-29 09:48:01
* @LastEditors: zerollzeng
* @LastEditTime: 2020-03-02 14:58:37
*/
#ifndef TRT_HPP
#define TRT_HPP
#include <string>
#include <vector>
#include <iostream>
#include <numeric>
#include <algorithm>
#include "NvInfer.h"
class TrtLogger : public nvinfer1::ILogger {
void log(Severity severity, const char* msg) override
{
// suppress info-level messages
if (severity != Severity::kVERBOSE)
std::cout << msg << std::endl;
}
};
struct TrtPluginParams {
// yolo-det layer
int yoloClassNum = 1;
int yolo3NetSize = 416; // 416 or 608
// upsample layer
float upsampleScale = 2;
};
class PluginFactory;
class Trt {
public:
/**
* @description: default constructor, will initialize plugin factory with default parameters.
*/
Trt();
/**
* @description: if you costomize some parameters, use this.
*/
Trt(TrtPluginParams params);
~Trt();
/**
* description: create engine from caffe prototxt and caffe model
* @prototxt: caffe prototxt
* @caffemodel: caffe model contain network parameters
* @engineFile: serialzed engine file, if it does not exit, will build engine from
* prototxt and caffe model, which take about 1 minites, otherwise will
* deserialize enfine from engine file, which is very fast.
* @outputBlobName: specify which layer is network output, find it in caffe prototxt
* @calibratorData: use for int8 mode, calabrator data is a batch of sample input,
* for classification task you need around 500 sample input. and this
* is for int8 mode
* @maxBatchSize: max batch size while inference, make sure it do not exceed max batch
* size in your model
* @mode: engine run mode, 0 for float32, 1 for float16, 2 for int8
*/
void CreateEngine(
const std::string& prototxt,
const std::string& caffeModel,
const std::string& engineFile,
const std::vector<std::string>& outputBlobName,
int maxBatchSize,
int mode,
const std::vector<std::vector<float>>& calibratorData);
/**
* @description: create engine from onnx model
* @onnxModel: path to onnx model
* @engineFile: path to saved engien file will be load or save, if it's empty them will not
* save engine file
* @maxBatchSize: max batch size for inference.
* @return:
*/
void CreateEngine(
const std::string& onnxModel,
const std::string& engineFile,
const std::vector<std::string>& customOutput,
int maxBatchSize,
int mode,
const std::vector<std::vector<float>>& calibratorData);
/**
* @description: create engine from uff model
* @uffModel: path to uff model
* @engineFile: path to saved engien file will be load or save, if it's empty them will not
* save engine file
* @inputTensorName: input tensor
* @outputTensorName: output tensor
* @maxBatchSize: max batch size for inference.
* @return:
*/
void CreateEngine(
const std::string& uffModel,
const std::string& engineFile,
const std::vector<std::string>& inputTensorName,
const std::vector<std::vector<int>>& inputDims,
const std::vector<std::string>& outputTensorName,
int maxBatchSize,
int mode,
const std::vector<std::vector<float>>& calibratorData);
/**
* @description: do inference on engine context, make sure you already copy your data to device memory,
* see DataTransfer and CopyFromHostToDevice etc.
*/
void Forward();
/**
* @description: async inference on engine context
* @stream cuda stream for async inference and data transfer
*/
void ForwardAsync(const cudaStream_t& stream);
/**
* @description: data transfer between host and device, for example befor Forward, you need
* copy input data from host to device, and after Forward, you need to transfer
* output result from device to host.
* @data data for read and write.
* @bindIndex binding data index, you can see this in CreateEngine log output.
* @isHostToDevice 0 for device to host, 1 for host to device (host: cpu memory, device: gpu memory)
*/
void DataTransfer(std::vector<float>& data, int bindIndex, bool isHostToDevice);
/**
* @description: async data tranfer between host and device, see above.
* @stream cuda stream for async interface and data transfer.
* @return:
*/
void DataTransferAsync(std::vector<float>& data, int bindIndex, bool isHostToDevice, cudaStream_t& stream);
void CopyFromHostToDevice(const std::vector<float>& input, int bindIndex);
void CopyFromDeviceToHost(std::vector<float>& output, int bindIndex);
void CopyFromHostToDevice(const std::vector<float>& input, int bindIndex,const cudaStream_t& stream);
void CopyFromDeviceToHost(std::vector<float>& output, int bindIndex,const cudaStream_t& stream);
void SetDevice(int device);
int GetDevice() const;
/**
* @description: get max batch size of build engine.
* @return: max batch size of build engine.
*/
int GetMaxBatchSize() const;
/**
* @description: get binding data pointer in device. for example if you want to do some post processing
* on inference output but want to process them in gpu directly for efficiency, you can
* use this function to avoid extra data io
* @return: pointer point to device memory.
*/
void* GetBindingPtr(int bindIndex) const;
/**
* @description: get binding data size in byte, so maybe you need to divide it by sizeof(T) where T is data type
* like float.
* @return: size in byte.
*/
size_t GetBindingSize(int bindIndex) const;
/**
* @description: get binding dimemsions
* @return: binding dimemsions, see https://docs.nvidia.com/deeplearning/sdk/tensorrt-api/c_api/classnvinfer1_1_1_dims.html
*/
nvinfer1::Dims GetBindingDims(int bindIndex) const;
/**
* @description: get binding data type
* @return: binding data type, see https://docs.nvidia.com/deeplearning/sdk/tensorrt-api/c_api/namespacenvinfer1.html#afec8200293dc7ed40aca48a763592217
*/
nvinfer1::DataType GetBindingDataType(int bindIndex) const;
std::vector<std::string> mBindingName;
protected:
bool DeserializeEngine(const std::string& engineFile);
void BuildEngine(nvinfer1::IBuilder* builder,
nvinfer1::INetworkDefinition* network,
const std::vector<std::vector<float>>& calibratorData,
int maxBatchSize,
int mode);
bool BuildEngineWithCaffe(const std::string& prototxt,
const std::string& caffeModel,
const std::string& engineFile,
const std::vector<std::string>& outputBlobName,
const std::vector<std::vector<float>>& calibratorData,
int maxBatchSize);
bool BuildEngineWithOnnx(const std::string& onnxModel,
const std::string& engineFile,
const std::vector<std::string>& customOutput,
const std::vector<std::vector<float>>& calibratorData,
int maxBatchSize);
bool BuildEngineWithUff(const std::string& uffModel,
const std::string& engineFile,
const std::vector<std::string>& inputTensorName,
const std::vector<std::vector<int>>& inputDims,
const std::vector<std::string>& outputTensorName,
const std::vector<std::vector<float>>& calibratorData,
int maxBatchSize);
/**
* description: Init resource such as device memory
*/
void InitEngine();
/**
* description: save engine to engine file
*/
void SaveEngine(const std::string& fileName);
protected:
TrtLogger mLogger;
// tensorrt run mode 0:fp32 1:fp16 2:int8
int mRunMode;
nvinfer1::ICudaEngine* mEngine = nullptr;
nvinfer1::IExecutionContext* mContext = nullptr;
PluginFactory* mPluginFactory;
nvinfer1::IRuntime* mRuntime = nullptr;
std::vector<void*> mBinding;
std::vector<size_t> mBindingSize;
std::vector<nvinfer1::Dims> mBindingDims;
std::vector<nvinfer1::DataType> mBindingDataType;
int mInputSize = 0;
// batch size
int mBatchSize;
};
#endif