0%

Guide

install

You have curl, right? Okay.

1
curl -L https://raw.githubusercontent.com/micha/resty/master/resty > resty

Source the script before using it.

1
. resty

Once resty is installed, set your REST host to which you will be making your requests.

1
2
resty http://127.0.0.1:8080/data
http://127.0.0.1:8080/data*

And now you can Make some HTTP requests.

1
2
3
4
5
6
7
8
9
10
$ GET /blogs.json
[ {"id" : 1, "title" : "first post", "body" : "This is the first post"}, ... ]

$ PUT /blogs/2.json '{"id" : 2, "title" : "updated post", "body" : "This is the new."}'
{"id" : 2, "title" : "updated post", "body" : "This is the new."}

$ DELETE /blogs/2

$ POST /blogs.json '{"title" : "new post", "body" : "This is the new new."}'
{"id" : 204, "title" : "new post", "body" : "This is the new new."}

usage

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
source resty [-W] [remote] [OPTIONS]    # load functions into shell
resty [-v] # prints current request URI base
resty <remote> [OPTIONS] # sets the base request URI

HEAD [path] [OPTIONS] # HEAD request
OPTIONS [path] [OPTIONS] # OPTIONS request
GET [path] [OPTIONS] # GET request
DELETE [path] [OPTIONS] # DELETE request
PUT [path] [data] [OPTIONS] # PUT request
PATCH [path] [data] [OPTIONS] # PATCH request
POST [path] [data] [OPTIONS] # POST request
TRACE [path] [OPTIONS] # TRACE request

Options:

-Q Don't URL encode the path.
-q <query> Send query string with the path. A '?' is prepended to
<query> and concatenated onto the <path>.
-W Don't write to history file (only when sourcing script).
-V Edit the input data interactively in 'vi'. (PUT, PATCH,
and POST requests only, with data piped to stdin.)
-Z Raw output. This disables any processing of HTML in the
response.
-v Verbose output. When used with the resty command itself
this prints the saved curl options along with the current
URI base. Otherwise this is passed to curl for verbose
curl output.
--dry-run Just output the curl command.
<curl opt> Any curl options will be passed down to curl.

Other Tools

postman

Postman is a collaboration platform for API development. Postman’s features simplify each step of building an API and streamline collaboration so you can create better APIs—faster

download Postman-linux-x64-7.10.0.tar.gz from here

1
2
3
tar xzvf Postman-linux-x64-7.10.0.tar.gz
cd Postman
./Postman

now we can create free account and play with postman.

chrome developer tools

F12—>Network.

httpbin

A simple HTTP Request & Response Service.

see httpbin

Reference

History

  • 20191106: created.

Guide

code

demo.cu

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
#include <cuda_runtime.h>
#include <cublas.h>
#include <cublas_api.h>
#include <cublas_v2.h>

bool CompareFeatureMtoN_gpu(float * featureM, float * featureN, float * result,
int count_m, int count_n, int size, int gpu_id) {
float *dev_featureM = 0;
float *dev_featureN = 0;
float *dev_result = 0;
const float alpha = 1, beta = 0;
cublasHandle_t handle;
cudaError_t cudaStatus;

cudaStatus = cudaSetDevice(gpu_id);
if (cudaStatus != cudaSuccess) {
printf("cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
goto out;
}
cublasCreate(&handle);

cudaStatus = cudaMalloc((void**)&dev_featureM, count_m * size * sizeof(float));
if (cudaStatus != cudaSuccess) {
printf("%s, line %d, cudaMalloc failed!\n", __func__, __LINE__);
goto out;
}
cudaStatus = cudaMalloc((void**)&dev_featureN, count_n * size * sizeof(float));
if (cudaStatus != cudaSuccess) {
printf("%s, line %d, cudaMalloc failed!\n", __func__, __LINE__);
goto out;
}
cudaStatus = cudaMalloc((void**)&dev_result, count_m * count_n * sizeof(float));
if (cudaStatus != cudaSuccess) {
printf("%s, line %d, cudaMalloc failed!\n", __func__, __LINE__);
goto out;
}

cudaStatus = cudaMemcpy(dev_featureM, featureM, count_m * size * sizeof(float),
cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
printf("%s, line %d, cudaMalloc failed!\n", __func__, __LINE__);
goto out;
}
cudaStatus = cudaMemcpy(dev_featureN, featureN, count_n * size * sizeof(float),
cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
printf("%s, line %d, cudaMalloc failed!\n", __func__, __LINE__);
goto out;
}

/*

CUBLAS assumes that the matrix in the device is stored in column major:

" where α and β are scalars, and A , B and C are matrices stored in column-major
format with dimensions op ( A ) m × k , op ( B ) k × n and C m × n , respectively.

Also, for matrix A


// Multiply the arrays A and B on GPU and save the result in C (coloum-major)
// C(m,n) = A(m,k) * B(k,n)

cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc);
*/

cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, count_n, count_m, size,
&alpha, dev_featureN, size, dev_featureM, size, &beta, dev_result, count_n);
cudaStatus = cudaThreadSynchronize();

cudaStatus = cudaMemcpy(result, dev_result, count_m * count_n * sizeof(float),
cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
printf("%s, line %d, cudaMemcpy failed!\n", __func__, __LINE__);
goto out;
}

out:
if(dev_featureM) cudaFree(dev_featureM);
if(dev_featureN) cudaFree(dev_featureN);
if(dev_result) cudaFree(dev_result);
cublasDestroy(handle);
return cudaStatus == cudaSuccess;
}

usage

demo.cu

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
void test_feature_compare()
{
/*

[a1]
[a2]
[a3]
* [b1]
[b2]

[10,35]
[10,35]
[10,35]
*/
std::vector<float> f1{0,1,2,3,4,5,6,7,8,9};
std::vector<float> f2{1,1,1,1,1,0,0,0,0,0},f22{0,0,0,0,0,1,1,1,1,1};

std::vector<std::vector<float>> A,B;
// 3*10
A.push_back(f1);
A.push_back(f1);
A.push_back(f1);

// 10 * 2
B.push_back(f2);
B.push_back(f22);


int m = 3;
int n = 2;
int dim = 10;
int gpu_id = 0;

float* feature_m = new float[ m*dim ];
float* feature_n = new float[ n*dim ];
auto tmp = feature_m;
for (int i = 0; i < m; i++) {
for (int j = 0; j < dim; j++)
*tmp++ = A[i][j];
}

tmp = feature_n;
for (int i = 0; i < n; i++) {
for (int j = 0; j < dim; j++)
*tmp++ = B[i][j];
}

printf("m = %d, n= %d, size= %d \n", m, n, dim); // 3, 2, 10

//float* result = CompareFeatureMtoN(feature_m, m*dim, feature_n, n*dim, dim, gpu_id);

float* result = new float[m*n];
CompareFeatureMtoN_gpu(feature_m, feature_n, result, m, n, dim, gpu_id);

tmp = result;
for(int i=0;i<6;i++)
printf("%f ", *(tmp++));

delete []feature_m;
delete []feature_n;
delete []result;
}

output

m = 3, n= 2, size= 10 
10.000000 35.000000 10.000000 35.000000 10.000000 35.000000

Reference

History

  • 20191015: created.

Guide

cuda utils

cuda.h

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
#ifndef __CUDA_H_
#define __CUDA_H_
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"

#define BLOCK 512

void check_error(cudaError_t status);

dim3 cuda_gridsize(size_t n);

float* cuda_make_array(float* x,size_t n);

void cuda_free(float* x_gpu);

void cuda_push_array(float *x_gpu,float* x,size_t n);

void cuda_pull_array(float *x_gpu,float* x,size_t n);


#endif

cuda.cpp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
#include "cuda.h"
#include "blas.h"

#include <assert.h>
#include <stdlib.h>
#include <time.h>
#include <stdio.h>

void error(const char* s)
{
perror(s);
assert(0);
exit(-1);
}

void check_error(cudaError_t status)
{
//cudaDeviceSynchronize();
cudaError_t status2 = cudaGetLastError();
if (status != cudaSuccess)
{
const char *s = cudaGetErrorString(status);
char buffer[256];
printf("CUDA Error: %s\n", s);
assert(0);
snprintf(buffer, 256, "CUDA Error: %s", s);
error(buffer);
}
if (status2 != cudaSuccess)
{
const char *s = cudaGetErrorString(status);
char buffer[256];
printf("CUDA Error Prev: %s\n", s);
assert(0);
snprintf(buffer, 256, "CUDA Error Prev: %s", s);
error(buffer);
}
}

dim3 cuda_gridsize(size_t n){
size_t k = (n-1) / BLOCK + 1;
size_t x = k;
size_t y = 1;
if(x > 65535){
x = ceil(sqrt(k));
y = (n-1)/(x*BLOCK) + 1;
}
dim3 d = {x, y, 1};
//printf("%ld %ld %ld %ld\n", n, x, y, x*y*BLOCK);
return d;
}

float* cuda_make_array(float* x,size_t n)
{
float *x_gpu;
size_t size = sizeof(float)*n;
cudaError_t status = cudaMalloc((void **)&x_gpu, size);
check_error(status);
if(x){
status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
check_error(status);
} else {
fill_gpu(n, 0, x_gpu, 1);
}
if(!x_gpu) error("Cuda malloc failed\n");
return x_gpu;
}

void cuda_free(float* x_gpu)
{
cudaError_t status = cudaFree(x_gpu);
check_error(status);
}

void cuda_push_array(float *x_gpu,float* x,size_t n)
{
size_t size = sizeof(float)*n;
cudaError_t status = cudaMemcpy(x_gpu,x,size,cudaMemcpyHostToDevice);
check_error(status);
}

void cuda_pull_array(float *x_gpu,float* x,size_t n)
{
size_t size = sizeof(float)*n;
cudaError_t status = cudaMemcpy(x,x_gpu,size,cudaMemcpyDeviceToHost);
check_error(status);
}

activation kernels

activations.h

1
2
3
4
5
6
7
8
9
10
11
#ifndef __ACTIVATIONS_H_
#define __ACTIVATIONS_H_

typedef enum{
LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, \
LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN
} ACTIVATION;

void activate_array_gpu(float* x,int n,ACTIVATION a);

#endif

activation_kernels.cu

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
#include "activations.h"
#include "cuda.h"
#include "blas.h"

__device__ float lhtan_activate_kernel(float x)
{
if(x < 0) return .001f*x;
if(x > 1) return .001f*(x-1.f) + 1.f;
return x;
}

__device__ float hardtan_activate_kernel(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}

__device__ float linear_activate_kernel(float x){return x;}
__device__ float logistic_activate_kernel(float x){return 1.f/(1.f + expf(-x));}
__device__ float loggy_activate_kernel(float x){return 2.f/(1.f + expf(-x)) - 1;}
__device__ float relu_activate_kernel(float x){return x*(x>0);}
__device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(expf(x)-1);}
__device__ float relie_activate_kernel(float x){return (x>0) ? x : .01f*x;}
__device__ float ramp_activate_kernel(float x){return x*(x>0)+.1f*x;}
__device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1f*x;}
__device__ float tanh_activate_kernel(float x){return (2.f/(1 + expf(-2*x)) - 1);}
__device__ float plse_activate_kernel(float x)
{
if(x < -4) return .01f * (x + 4);
if(x > 4) return .01f * (x - 4) + 1;
return .125f*x + .5f;
}
__device__ float stair_activate_kernel(float x)
{
int n = floorf(x);
if (n%2 == 0) return floorf(x/2);
else return (x - n) + floorf(x/2);
}

__device__ float activate_kernel(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_activate_kernel(x);
case LOGISTIC:
return logistic_activate_kernel(x);
case LOGGY:
return loggy_activate_kernel(x);
case RELU:
return relu_activate_kernel(x);
case ELU:
return elu_activate_kernel(x);
case RELIE:
return relie_activate_kernel(x);
case RAMP:
return ramp_activate_kernel(x);
case LEAKY:
return leaky_activate_kernel(x);
case TANH:
return tanh_activate_kernel(x);
case PLSE:
return plse_activate_kernel(x);
case STAIR:
return stair_activate_kernel(x);
case HARDTAN:
return hardtan_activate_kernel(x);
case LHTAN:
return lhtan_activate_kernel(x);
}
return 0;
}

__global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) x[i] = activate_kernel(x[i], a);
}

void activate_array_gpu(float *x, int n, ACTIVATION a)
{
activate_array_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, a);
check_error(cudaPeekAtLastError());
}

Reference

History

  • 20191014: created.

Guide

caffe

load from file

1
2
3
4
5
6
enum caffe::Phase phase = caffe::Phase::TEST;

std::string proto_filepath = "yolov3.prototxt";
std::string weight_filepath = "yolov3.caffemodel";
caffe::Net<float> net = caffe::Net<float>(proto_filepath, phase));
net.CopyTrainedLayersFrom(weight_filepath);

load from stream

no caffe method to load directly from stream.
we can override ReadProtoFromTextFile and ReadProtoFromBinaryFile in src/caffe/util/io.cpp to implement this fuction.

Replace

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
bool ReadProtoFromTextFile(const char* filename, Message* proto) {
Encryption encryption;
int fd = open(filename, O_RDONLY);
CHECK_NE(fd, -1) << "File not found: " << filename;
FileInputStream* input = new FileInputStream(fd);
bool success = google::protobuf::TextFormat::Parse(input, proto);
delete input;
close(fd);
return success;
}

bool ReadProtoFromBinaryFile(const char* filename, Message* proto) {
int fd = open(filename, O_RDONLY);
CHECK_NE(fd, -1) << "File not found: " << filename;
ZeroCopyInputStream* raw_input = new FileInputStream(fd);
CodedInputStream* coded_input = new CodedInputStream(raw_input);
coded_input->SetTotalBytesLimit(kProtoReadBytesLimit, 536870912);

bool success = proto->ParseFromCodedStream(coded_input);

delete coded_input;
delete raw_input;
close(fd);
return success;
}

load from demo.prototxt and demo.caffemodel

with

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
bool ReadProtoFromTextFile(const char *filename, Message *proto) {
Encryption encryption;
string res = encryption.decryptTextFile(filename); // demo.prototxt
istringstream ss(res);

IstreamInputStream *input = new IstreamInputStream(&ss);

bool success = google::protobuf::TextFormat::Parse(input, proto);
delete input;
return success;
}


bool ReadProtoFromBinaryFile(const char *filename, Message *proto) {
Encryption encryption;
string res = encryption.decryptModelFile(filename); // demo.caffemodel
istringstream ss(res);

IstreamInputStream *input = new IstreamInputStream(&ss);
CodedInputStream *coded_input = new CodedInputStream(input);
coded_input->SetTotalBytesLimit(kProtoReadBytesLimit, 536870912);

bool success = proto->ParseFromCodedStream(coded_input);

delete coded_input;
delete input;
return success;
}

load from demo_encrypt.prototxt and demo_encrypt.caffemodel

pytorch

  • torch::jit::script::Module load(const std::string& filename,...);
  • torch::jit::script::Module load(const std::istream& in,...);

load from file

1
2
3
std::string model_path = "model.libpt";
torch::jit::script::Module net = torch::jit::load(model_path);
assert(net != nullptr);

load from stream

1
2
3
4
std::string model_content = ""; // read from file
std::istringstream ss(model_content);
torch::jit::script::Module net = torch::jit::load(ss);
assert(net != nullptr);

Reference

History

  • 20191014: created.

Guide

questions

模板类必须在header中实现,而不能在cpp中实现,否则作为dll调用进行链接的时候回出错。

common solutions(Recommend)

implement template functions in header.

ThreadPool.h

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
class  SHARED_EXPORT ThreadPool {
public:
static ThreadPool* Instance(size_t max_thread_pool_size);
~ThreadPool();

// Add new work item to the pool.
template<class F>
inline void Enqueue(F f)
{
io_service_.post(f);//sync, return immediately
}

void Free();

private:
static std::shared_ptr<ThreadPool> m_pInstance;
bool bfree;

ThreadPool(size_t size);
DISABLE_COPY_AND_ASSIGN(ThreadPool);

boost::thread_group workers_;
boost::asio::io_service io_service_;
boost::asio::io_service::work work_;
};

Seperate from headers

solutions 1

A common solution to this is to write the template declaration in a header file, then implement the class in an implementation file (for example .tpp), and include this implementation file at the end of the header.

Foo.h

1
2
3
4
5
6
7
template <typename T>
struct Foo
{
void doSomething(T param);
};

#include "Foo.cpp" // here

Foo.cpp

1
2
3
4
5
template <typename T>
void Foo<T>::doSomething(T param)
{
//implementation
}

solutions 2

Another solution is to keep the implementation separated, and explicitly instantiate all the template instances you’ll need:

Foo.h

1
2
// no implementation
template <typename T> struct Foo { ... };

Foo.cpp

1
2
3
4
5
6
7
8
9
10
#include "Foo.h"

// implementation of Foo's methods

// explicit instantiations
template class Foo<int>;
template class Foo<float>;
// You will only be able to use Foo with int or float

// template void TestClass::templateFunction<int, int>(int, int);

Reference

History

  • 20191012: created.

Guide

shared_ptr

Prior to C++17, shared_ptr could not be used to manage dynamically allocated arrays. By default, shared_ptr will call delete on the managed object when no more references remain to it. However, when you allocate using new[] you need to call delete[], and not delete, to free the resource.

In order to correctly use shared_ptr with an array, you must supply a custom deleter.

code example

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
//OK, pointer to int 999
std::shared_ptr<int> sp(new int(999));


template< typename T >
struct array_deleter
{
void operator ()( T const * p)
{
delete[] p;
}
};

// pointer to int array,
// (1) provide array deleter
std::shared_ptr<int> sp(new int[10], array_deleter<int>());

// (2) or lambda expression
std::shared_ptr<int> sp(new int[10], [](int *p) { delete[] p; });

// (3) or use default_delete
std::shared_ptr<int> sp(new int[10], std::default_delete<int[]>());

// (4) or we can use unique_ptr
std::unique_ptr<int[]> up(new int[10]); // this will correctly call delete[]

// (5) or we use vector<int>, no need to provide deleter
typedef std::vector<int> int_array_t;
std::shared_ptr<int_array_t> sp(new int_array_t(10));

std::memcpy(sp.get()->data(), arr, size);

std::unique_ptr<int[]> has built-in support for arrays to properly delete[] .

image buffer

1
2
3
std::shared_ptr<uchar> pImage(new uchar[length], std::default_delete<uchar[]>());
memcpy(pImage.get(), (void*)(data.sync_image().data().c_str()), length);
cv::Mat image = cv::Mat(height, width, CV_8UC3, pImage.get());

Reference

History

  • 20191012: created.

Guide

include

1
2
3
4
#include <nlohmann/json.hpp>

// for convenience
using json = nlohmann::json;

compile with

-std=c++11

CMakeLists.txt

1
2
3
4
5
6
# CMakeLists.txt
find_package(nlohmann_json 3.2.0 REQUIRED)
...
add_library(foo ...)
...
target_link_libraries(foo PRIVATE nlohmann_json::nlohmann_json)

Usage

json demo

1
2
3
4
5
6
7
8
9
10
11
12
13
14
{
"pi": 3.141,
"happy": true,
"name": "Niels",
"nothing": null,
"answer": {
"everything": 42
},
"list": [1, 0, 2],
"object": {
"currency": "USD",
"value": 42.99
}
}

with code

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
// create an empty structure (null)
json j;

// add a number that is stored as double (note the implicit conversion of j to an object)
j["pi"] = 3.141;

// add a Boolean that is stored as bool
j["happy"] = true;

// add a string that is stored as std::string
j["name"] = "Niels";

// add another null object by passing nullptr
j["nothing"] = nullptr;

// add an object inside the object
j["answer"]["everything"] = 42;

// add an array that is stored as std::vector (using an initializer list)
j["list"] = { 1, 0, 2 };

// add another object (using an initializer list of pairs)
j["object"] = { {"currency", "USD"}, {"value", 42.99} };

// instead, you could also write (which looks very similar to the JSON above)
json j2 = {
{"pi", 3.141},
{"happy", true},
{"name", "Niels"},
{"nothing", nullptr},
{"answer", {
{"everything", 42}
}},
{"list", {1, 0, 2}},
{"object", {
{"currency", "USD"},
{"value", 42.99}
}}
};

serialization

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
// create object from string literal
json j = "{ \"happy\": true, \"pi\": 3.141 }"_json;

// or even nicer with a raw string literal
auto j2 = R"(
{
"happy": true,
"pi": 3.141
}
)"_json;

// parse explicitly
auto j3 = json::parse("{ \"happy\": true, \"pi\": 3.141 }");


// explicit conversion to string
std::string s = j.dump(); // {\"happy\":true,\"pi\":3.141}

// serialization with pretty printing
// pass in the amount of spaces to indent
std::cout << j.dump(4) << std::endl;
// {
// "happy": true,
// "pi": 3.141
// }

read from file/save to file

1
2
3
4
5
6
7
8
// read a JSON file
std::ifstream i("file.json");
json j;
i >> j;

// write prettified JSON to another file
std::ofstream o("pretty.json");
o << std::setw(4) << j << std::endl;

Arbitrary types conversions

1
2
3
4
5
6
7
8
namespace ns {
// a simple struct to model a person
struct person {
std::string name;
std::string address;
int age;
};
}

normal method

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
ns::person p = {"Ned Flanders", "744 Evergreen Terrace", 60};

// convert to JSON: copy each value into the JSON object
json j;
j["name"] = p.name;
j["address"] = p.address;
j["age"] = p.age;

// ...

// convert from JSON: copy each value from the JSON object
ns::person p {
j["name"].get<std::string>(),
j["address"].get<std::string>(),
j["age"].get<int>()
};

better method

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
using nlohmann::json;

namespace ns {
void to_json(json& j, const person& p) {
j = json{{"name", p.name}, {"address", p.address}, {"age", p.age}};
}

void from_json(const json& j, person& p) {
j.at("name").get_to(p.name);
j.at("address").get_to(p.address);
j.at("age").get_to(p.age);
}
} // namespace ns


// create a person
ns::person p {"Ned Flanders", "744 Evergreen Terrace", 60};

// conversion: person -> json
json j = p;

std::cout << j << std::endl;
// {"address":"744 Evergreen Terrace","age":60,"name":"Ned Flanders"}

// conversion: json -> person
auto p2 = j.get<ns::person>();

// that's it
assert(p == p2);

That’s all! When calling the json constructor with your type, your custom to_json method will be automatically called. Likewise, when calling get<your_type>() or get_to(your_type&), the from_json method will be called.

How do I convert third-party types?

1
2
3
4
5
6
7
8
9
10
11
12
13
14
namespace nlohmann {

template <typename T>
struct adl_serializer {
static void to_json(json& j, const T& value) {
// calls the "to_json" method in T's namespace
}

static void from_json(const json& j, T& value) {
// same thing, but with the "from_json" method
}
};

}

How can I use get() for non-default constructible/non-copyable types?

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
struct move_only_type {
move_only_type() = delete;
move_only_type(int ii): i(ii) {}
move_only_type(const move_only_type&) = delete;
move_only_type(move_only_type&&) = default;

int i;
};

namespace nlohmann {
template <>
struct adl_serializer<move_only_type> {
// note: the return type is no longer 'void', and the method only takes
// one argument
static move_only_type from_json(const json& j) {
return {j.get<int>()};
}

// Here's the catch! You must provide a to_json method! Otherwise you
// will not be able to convert move_only_type to json, since you fully
// specialized adl_serializer on that type
static void to_json(json& j, move_only_type t) {
j = t.i;
}
};
}

examples

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
#pragma once

#include <nlohmann/json.hpp>
using json = nlohmann::json;
#include "sensor_data.h"
#include "rfid_info.h"

namespace nlohmann {
template <>
struct adl_serializer<SensorData> {
// note: the return type is no longer 'void', and the method only takes
// one argument
static SensorData from_json(const json& j) {
SensorData sensor_data;
sensor_data.sensor_identify = j.at("SensorIdentify").get<string>();
return sensor_data;
}

// Here's the catch! You must provide a to_json method! Otherwise you
// will not be able to convert move_only_type to json, since you fully
// specialized adl_serializer on that type
static void to_json(json& j, SensorData t) {
j = json{ {"SensorIdentify",t.sensor_identify},{"SensorType",t.sensor_type },{"Data",t.data} };
}
};
template <>
struct adl_serializer<RfidInfo> {
// note: the return type is no longer 'void', and the method only takes
// one argument
static RfidInfo from_json(const json& j) {
RfidInfo rfid_info;
rfid_info.identify = j.at("Identify").get<string>();
return rfid_info;
}

// Here's the catch! You must provide a to_json method! Otherwise you
// will not be able to convert move_only_type to json, since you fully
// specialized adl_serializer on that type
static void to_json(json& j, RfidInfo t) {
j = json{ { "Identify",t.identify },{ "Position",0 } };
}
};
}

Binary formats(BSON…)

1
2
3
4
5
6
7
8
9
10
// create a JSON value
json j = R"({"compact": true, "schema": 0})"_json;

// serialize to BSON
std::vector<std::uint8_t> v_bson = json::to_bson(j);

// 0x1B, 0x00, 0x00, 0x00, 0x08, 0x63, 0x6F, ...

// roundtrip
json j_from_bson = json::from_bson(v_bson);

Reference

History

  • 20191012: created.

Guide

introduction

Where produce-consumer pattern is present it is often the case that one is faster that the other:

  • a parsing producer reads records faster than a processing consumer;
  • a disk reading producer is faster than network sending consumer.

Producer and consumer often communicate by queues: the producer will put items on a queue while the consumer will pop items off a queue. What happens when the queue becomes full, or empty?

One approach of the producer is to try to put an item on a queue and if it’s full yield the thread and repeat. Similarly the consumer can try to pop an item off a queue and if it’s empty, ditto. This approach of try-fail-yield can unnecessarily burn CPU cycles in tight loops that constantly try to put or pop items off a queue.

Another approach is to temporarily grow the queue, but that doesn’t scale well. When do we stop growing? And once we stop we have to fall back onto the try-fail-yield method.

What if we could implement a blocking queue:

  • a queue who’s put operation blocks when the queue if full, and unblocks only when another thread pops an item off the queue
  • Similarly a queue who’s pop operation blocks when the queue is empty, and unblocks only when another thread puts an item on the queue.

Quote from here

An example of using such a queue would look like this (notice a fast producer and slow consumer in the code below):

blocking queue v1

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
//std
#include <queue>

//boost
#include <boost/thread.hpp>
#include <boost/bind.hpp>
#include <boost/asio.hpp>

namespace my {
namespace algorithm {

template<typename Data>
class SHARED_EXPORT blocking_queue
{
private:
std::queue<Data> the_queue;
mutable boost::mutex the_mutex;
boost::condition_variable the_condition_variable;

public:
void push(Data const& data)
{
boost::mutex::scoped_lock lock(the_mutex);
the_queue.push(data);
lock.unlock();
the_condition_variable.notify_one();
}

bool empty() const
{
boost::mutex::scoped_lock lock(the_mutex);
return the_queue.empty();
}

size_t size() const
{
boost::mutex::scoped_lock lock(the_mutex);
return the_queue.size();
}

bool try_pop(Data& popped_value)
{
boost::mutex::scoped_lock lock(the_mutex);
if (the_queue.empty())
{
return false;
}

popped_value = the_queue.front();
the_queue.pop();
return true;
}

void wait_and_pop(Data& popped_value)
{
boost::mutex::scoped_lock lock(the_mutex);
while (the_queue.empty())
{
the_condition_variable.wait(lock);
}

popped_value = the_queue.front();
the_queue.pop();
}

void signal_exit()
{
Data data;
push(data);
}

};

}
}// end namespace

blocking queue v2

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98

#pragma once
#include <iostream>
#include <assert.h>

#include <queue>
#include <mutex>
#include <condition_variable>

#define MAX_CAPACITY 20

namespace my {
namespace algorithm {

template<typename T>
class SHARED_EXPORT BlockingQueue
{
public:
BlockingQueue()
:mtx(), full_(), empty_(), capacity_(MAX_CAPACITY) { }


void Push(const T& data){
std::unique_lock<std::mutex> lock(mtx);
while(queue_.size() == capacity_){
full_.wait(lock );
}

assert(queue_.size() < capacity_);
queue_.push(data);
empty_.notify_all();
}

T Pop(){
std::unique_lock<std::mutex> lock(mtx);
while(queue_.empty()){
empty_.wait(lock );
}

assert(!queue_.empty());
T front(queue_.front());
queue_.pop();
full_.notify_all();
return front;
}

T Front(){
std::unique_lock<std::mutex> lock(mtx);
while(queue_.empty()){
empty_.wait(lock );
}

assert(!queue_.empty());
T front(queue_.front());
return front;
}

T Back(){
std::unique_lock<std::mutex> lock(mtx);
while(queue_.empty()){
empty_.wait(lock );
}

assert(!queue_.empty());
T back(queue_.back());
return back;
}

size_t Size(){
std::lock_guard<std::mutex> lock(mtx);
return queue_.size();
}

bool Empty(){
std::unique_lock<std::mutex> lock(mtx);
return queue_.empty();
}

void SetCapacity(const size_t capacity){
capacity_ = (capacity > 0 ? capacity : MAX_CAPACITY);
}

private:
//DISABLE_COPY_AND_ASSIGN(BlockingQueue);
BlockingQueue(const BlockingQueue& rhs);
BlockingQueue& operator= (const BlockingQueue& rhs);

private:
mutable std::mutex mtx;
std::condition_variable full_;
std::condition_variable empty_;
std::queue<T> queue_;
size_t capacity_;
};


}
}// end namespace

Reference

History

  • 20191012: created.

go tutorial

versions:

  • go: 1.13.1

install

1
2
3
4
5
6
7
8
9
wget https://dl.google.com/go/go1.13.1.linux-amd64.tar.gz
sudo tar -C /usr/local -xzf go1.13.1.linux-amd64.tar.gz

ll /usr/local/go

vim ~/.bashrc
export PATH=$PATH:/usr/local/go/bin

source ~/.bashrc

zsh uses env profile ~/.zshrc, bash use env profile ~/.bashrc.

check version

1
2
go version
go version go1.13.1 linux/amd64

uninstall

just delete /usr/local/go

set GOPATH

Create your workspace directory, $HOME/go.

The GOPATH environment variable specifies the location of your workspace. If no GOPATH is set, it is assumed to be $HOME/go on Unix systems.

Note that GOPATH must not be the same path as your Go installation.

issue the commands

1
2
3
4
5
6
7
8
9
10
11
12
13
14
vim .bashrc 
# for golang
export GOPATH=$HOME/go
export PATH=/usr/local/go/bin:$GOPATH:$PATH

source .bashrc

#go env -w GOPATH=$HOME/go

$ echo $GOPATH
/home/kezunlin/go

$ go env GOPATH
/home/kezunlin/go

code organization

  • Go programmers typically keep all their Go code in a single workspace.
  • A workspace contains many version control repositories (managed by Git, for example).
  • Each repository contains one or more packages.
  • Each package consists of one or more Go source files in a single directory.
  • The path to a package’s directory determines its import path.

like this

bin/
    hello                          # command executable
    outyet                         # command executable
src/
    github.com/golang/example/
        .git/                      # Git repository metadata
        hello/
            hello.go               # command source
        outyet/
            main.go                # command source
            main_test.go           # test source
        stringutil/
            reverse.go             # package source
            reverse_test.go        # test source
    golang.org/x/image/
        .git/                      # Git repository metadata
        bmp/
            reader.go              # package source
            writer.go              # package source
    ... (many more repositories and packages omitted) ...

Note that symbolic links should not be used to link files or directories into your workspace.

An import path is a string that uniquely identifies a package.

go example

your first program

1
2
3
mkdir -p $GOPATH/src/github.com/kezunlin/hello
cd $GOPATH/src/github.com/kezunlin/hello
vim hello.go

hello.go

1
2
3
4
5
6
7
package main

import "fmt"

func main() {
fmt.Printf("hello, world\n")
}

build and run

1
2
3
go build
./hello
hello, world

install and clean binary files

1
2
3
4
5
# install hello to $HOME/go/bin
go install

# clean $HOME/go/bin/*
go clean -i

~/go/src$ go build github.com/kezunlin/hello/
~/go/src$ go install github.com/kezunlin/hello/

your first library

stringutil library

1
2
3
mkdir -p $GOPATH/src/github.com/kezunlin/stringutil
cd $GOPATH/src/github.com/kezunlin/stringutil
vim reverse.go

reverse.go

1
2
3
4
5
6
7
8
9
10
11
// Package stringutil contains utility functions for working with strings.
package stringutil

// Reverse returns its argument string reversed rune-wise left to right.
func Reverse(s string) string {
r := []rune(s)
for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
r[i], r[j] = r[j], r[i]
}
return string(r)
}
package <name>

where name is the package’s default name for imports. (All files in a package must use the same name.)
executable commands must always use package main.

build library

1
2
3
go build github.com/kezunlin/stringutil
#This won't produce an output file. Instead it saves
#the compiled package in the local build cache.

use stringutil in hello.go

1
2
3
4
5
6
7
8
9
10
11
package main

import (
"fmt"
"github.com/kezunlin/stringutil"
)

func main() {
fmt.Printf("hello, world\n")
fmt.Println(stringutil.Reverse("!oG ,olleH"))
}

build and install

1
2
3
4
5
6
go build github.com/kezunlin/hello
go install github.com/kezunlin/hello

~/go/bin$ ./hello
hello, world
Hello, Go!

folder structure

1
2
3
4
5
6
7
8
9
10
11
12
13
tree .
.
├── bin
│   └── hello
└── src
└── github.com
└── kezunlin
├── hello
│   └── hello.go
└── stringutil
└── reverse.go

6 directories, 3 files

testing

You write a test by creating a file with a name ending in _test.go that contains functions named TestXXX with signature func (t *testing.T). The test framework runs each such function; if the function calls a failure function such as t.Error or t.Fail, the test is considered to have failed.

  • file name: xxx_test.go
  • function name: TextXXX
  • error: t.Error or t.Fail

reverse_test.go

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
package stringutil

import "testing"

func TestReverse(t *testing.T) {
cases := []struct {
in, want string
}{
{"Hello, world", "dlrow ,olleH"},
{"Hello, 世界", "界世 ,olleH"},
{"", ""},
}
for _, c := range cases {
got := Reverse(c.in)
if got != c.want {
t.Errorf("Reverse(%q) == %q, want %q", c.in, got, c.want)
}
}
}

test ok

1
2
$ go test github.com/kezunlin/stringutil
ok github.com/kezunlin/stringutil 0.165s

test error

--- FAIL: TestReverse (0.00s)
    reverse_test.go:16: Reverse("Hello, 世界2") == "2界世 ,olleH", want "界世 ,olleH"
FAIL
exit status 1
FAIL    github.com/kezunlin/stringutil  0.003s    

remote packages

1
2
3
$ go get github.com/golang/example/hello
$ $GOPATH/bin/hello
Hello, Go examples!

go commands

1
2
3
4
5
6
7
8
9
go help gopath 
go help importpath
go help test

go build
go install
go clean

go get # fetch,build and install

Reference

History

  • 20191011: created.

Guide

install

1
sudo wget -O /usr/include/colorwheel.h https://raw.githubusercontent.com/Totoditoto/colorwheel/master/colorwheel.h

install to /usr/include/colorwheel.h

usage

only include header

1
#include <colorwheel.h>

macros

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
/* Predefined printf styled messages */
CW_PRINT_NORMAL(message, ...);
CW_PRINT_ALARM(message, ...);
CW_PRINT_CRITICAL(message, ...);
CW_PRINT_FAULT(message, ...);
CW_PRINT_VALID(message, ...);
CW_PRINT_INFO(message, ...);

/* Predefined trace styled messages (indicates file, function and line) */
CW_TRACE_NORMAL(message, ...);
CW_TRACE_ALARM(message, ...);
CW_TRACE_CRITICAL(message, ...);
CW_TRACE_FAULT(message, ...);
CW_TRACE_VALID(message, ...);
CW_TRACE_INFO(message, ...);

code

1
2
3
4
5
6
7
8
9
10
11
12
13
#include <colorwheel.h>

void demo_colorwheel(void)
{
CW_TRACE_NORMAL("This is a normal information");
CW_TRACE_INFO("This is a noticeable information");
CW_TRACE_FAULT("Ooops something might have gone wrong");
CW_TRACE_VALID("Finally it's okay, don't worry");
//CW_TRACE_NORMAL("Nevermind");
int value = 10;
CW_TRACE_CRITICAL("It was way worse than expected! Computer will explode in %d s", value);
CW_TRACE_ALARM("IT IS TOO LATE RUN AWAY");
}

colored print

Reference

History

  • 20191010: created.