0%

Series

Guide

method 1

1
2
3
4
5
$ git clone https://github.com/JetsonHacksNano/installVSCode.git
$ cd installVSCode
$ ./installVSCode.sh

Get:1 https://packagecloud.io/headmelted/codebuilds/debian stretch/main arm64 code-oss arm64 1.42.0-1575969886 [57.0 MB]

code-oss 1.42.0

method 2

download vscode from code-oss_1.32.3-arm64.deb and install

1
$ sudo dpkg -i code-oss_1.32.3-arm64.deb

code-oss 1.32.3

Start VSCode

1
2
3
4
5
6
7
8
# check version
$ code-oss --version
1.42.0
d01e111fb0e6e27378fcd049bbc9b36d0833d78e
arm64

# start vscode on nano
$ code-oss

History

  • 2019/12/20: created.

Series

Guide

  • Host: Ubuntu 16.04/18.04, VS Code Insiders
  • Target: ARM64 Jetson Nano, Ubuntu 18.04

for using Remote-SSH for Linux arm64 aarch64 platform such as Nvidia
Jetson TX1 TX2 Nano, we have to install VS Code Insiders instead of normal VS Code

Steps as follow:

  1. donwload code-insiders_1.42.0-1576733727_amd64.deb from here.

  2. install VS Code Insiders by

    $ sudo dpkg -i code-insiders_1.42.0-1576733727_amd64.deb

  3. start VS Code Insiders

    $ code-insiders

  4. install Remote-SSH extension for VS Code Insiders and restart

  5. Now we can use VS Code Insiders to connect to your Linux arm64 aarch64 platform such as Nvidia
    Jetson TX1 TX2 Nano

check vscode version

  • code 1.40.1
  • code-insiders 1.42.0
1
2
3
4
5
6
7
8
9
10
11
# vscode 
$ code --version
1.40.1
8795a9889db74563ddd43eb0a897a2384129a619
x64

# vscode insiders
$ code-insiders --version
1.42.0-insider
e74405d11443c5361c31e2bc341866d146eee206
x64

History

  • 2019/12/20: created.

How to get alexa rank

xml demo

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
content = """
<ALEXA VER="0.9" URL="kezunlin.me/" HOME="0" AID="=" IDN="kezunlin.me/">
<RLS PREFIX="http://" more="0"> </RLS>
<SD TITLE="A" FLAGS="" HOST="kezunlin.me"> </SD>
<SD>
<POPULARITY URL="kezunlin.me/" TEXT="2489312" SOURCE="panel"/>
<REACH RANK="2141483"/>
<RANK DELTA="-1406486"/>
</SD>
</ALEXA>
"""

root = ElementTree.fromstring(content)
# obj.tag, obj.attrib

for name, value in root.attrib.items():
print('{0}="{1}"'.format(name, value))
for child in root:
print(child.tag, child.attrib)

pandas demo

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
def demo_create_dataframe_1():
# initialize list of lists
data = [['tom', 10], ['nick', 15], ['juli', 14]]

# Create the pandas DataFrame
df = pd.DataFrame(data, columns = ['Name', 'Age'])

# print dataframe.
print(df)

def demo_create_dataframe_2():
# intialise data of lists.
data = {'Name':['Tom', 'nick', 'krish', 'jack'], 'Age':[20, 21, 19, 18]}

# Create DataFrame
df = pd.DataFrame(data)

# Print the output.
print(df)

def demo_append_dataframe():
# Creating the first Dataframe using dictionary
df1 = df = pd.DataFrame({"a":[1, 2, 3, 4],
"b":[5, 6, 7, 8]})

# Creating the Second Dataframe using dictionary
df2 = pd.DataFrame({"a":[1, 2, 3],
"b":[5, 6, 7]})

# Print df1
print(df1, "\n")

# to append df2 at the end of df1 dataframe
new_df = df1.append(df2, ignore_index = True) # 更新index
print(new_df)

def demo_pandas_index():
data = {'Name':['Tom', 'nick', 'krish', 'jack'], 'Age':[20, 21, 19, 18]}
df = pd.DataFrame(data)

# case1:
df.to_csv('data.csv',index=False)
df = pd.read_csv('file_name.csv')

# case2
df.to_csv('data.csv ', index=True) # default with index
df_new = pd.read_csv('data.csv').drop(['unnamed 0'],axis=1)

get rank and save to csv

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
#!/usr/bin/python
# -*- coding: UTF-8 -*-

import os
import datetime
import pandas as pd
import requests
from xml.etree import ElementTree

def get_alexa_rank(site="kezunlin.me"):
url = "http://data.alexa.com/data?cli=10&dat=snbamz&url={}".format(site)
#print(url)
r = requests.get(url)
rank = 0
if r.status_code == 200:
#print(r.content)
root = ElementTree.fromstring(r.content)
rank = int(root[2][0].attrib["TEXT"]) # by index
return rank

"""
<ALEXA VER="0.9" URL="kezunlin.me/" HOME="0" AID="=" IDN="kezunlin.me/">
<RLS PREFIX="http://" more="0"> </RLS>
<SD TITLE="A" FLAGS="" HOST="kezunlin.me"> </SD>
<SD>
<POPULARITY URL="kezunlin.me/" TEXT="2489312" SOURCE="panel"/>
<REACH RANK="2141483"/>
<RANK DELTA="-1406486"/>
</SD>
</ALEXA>
"""

def save_to_csv(date_str,rank,csv_filepath='rank.csv'):
# load existing df1
df1 = pd.read_csv(csv_filepath)
#print(df1)

# create df2
data = {'Date':[date_str], 'Rank':[rank]}
df2 = pd.DataFrame(data)
#print(df2)

# append
df = df1.append(df2, ignore_index = True) # 更新index

# save new df to csv
df.to_csv(csv_filepath, index=False, sep=',', encoding='utf-8')
#print(df)
print("Appending to {}".format(csv_filepath))

def main():
# get rank
rank = get_alexa_rank("kezunlin.me")
date = datetime.datetime.now()
date_str = date.strftime('%Y-%m-%d')
print("date = {} , rank = {}".format(date_str, rank))

save_to_csv(date_str,rank)

if __name__ =="__main__":
main()

output

date = 2019-12-16 , rank = 2486318
Appending to rank.csv

view results

1
2
3
4
$ cat rank.csv 
Date,Rank
2019-12-13,2489312
2019-12-16,2486318

Reference

History

  • 2019/12/16: created.

Regex Online Demo

regex online demo

Code Example

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
#!/usr/bin/python
# -*- coding: UTF-8 -*-

import os
import re

import shutil
import requests

def regrex_demo():
pattern = r'https://img201[7-9].cnblogs.com/blog/\d{6}/20\d{4}/\d{6}-\d{,}-\d{,}.png'
string = """![flops](https://kezunlin.me/images/posts/635233-20190912095826925-710547982.png)
hello world
![flops](https://kezunlin.me/images/posts/635233-20190912095826925-710547982.png)"""
results = re.findall(pattern,string) # list
for url in results:
print(url)

def get_filepaths(root_dir):
filepaths = []
for filename in os.listdir(root_dir):
filepath = os.path.sep.join([root_dir, filename])
filepaths.append(filepath)
return filepaths

def makesure_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)

def find_cnblog_image_urls(filepath):
# img2017 img2018 img2019
# images2017 images2018 images2019
#pattern = r'https*://img201[0-9].cnblogs.com/blog/\d{6,8}/20\d{4}/\d{6,8}-\d{,}-\d{,}.png'
#pattern = r'https*://img201[0-9].cnblogs.com/blog/\d{6,8}/20\d{4}/\d{6,8}-\d{,}-\d{,}.jpg'

#pattern = r'https*://images201[0-9].cnblogs.com/blog/\d{6,8}/20\d{4}/\d{6,8}-\d{,}-\d{,}.png'
pattern = r'https*://images201[0-9].cnblogs.com/blog/\d{6,8}/20\d{4}/\d{6,8}-\d{,}-\d{,}.jpg'

urls = []
with open(filepath,"r") as f:
contents = f.read().replace('\n', '') # read file into one string
urls = re.findall(pattern,contents) # list
return urls

def download_image(url, to_file):
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(to_file, 'wb') as f:
#r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
print("Save to ",to_file)

def download_image_by_chunk(url, to_file):
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(to_file, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
print("Save to ",to_file)

def replace_inplace(filepath, old_string, new_string):
f = open(filepath,'r')
filedata = f.read()
f.close()

newdata = filedata.replace(old_string,new_string)

f = open(filepath,'w')
f.write(newdata)
f.close()

def download_image_wrapper(url, to_dir):
"""
donwnload image from url and return new url
"""
filename = url.split("/")[-1]
to_file = os.path.sep.join([to_dir, filename])
download_image(url,to_file)

new_url = "https://kezunlin.me/images/posts/{}".format(filename)
return new_url

def process_all_posts():
to_dir = "images/posts" # images dir
makesure_dir(to_dir)

#posts_dir = "../source/_posts/"
posts_dir = "test_posts"
posts_dir = "_posts"
filepaths = get_filepaths(posts_dir)
for filepath in filepaths:
print("="*20)
print(filepath)
urls = find_cnblog_image_urls(filepath)
for url in urls:
new_url = download_image_wrapper(url,to_dir)
replace_inplace(filepath,url,new_url)

def main():
#regrex_demo()
process_all_posts()

if __name__ =="__main__":
main()

"""
grep -r "cnblogs.com/blog" source/_posts
"""

Reference

History

  • 2019/12/13: created.

Series

Guide

Jetson Family

  • Jetson TX1 Developer Kit
  • Jetson TX2 Developer Kit
  • Jetson AGX Xaiver Developer Kit
  • Jetson Nano Developer Kit

SDKs and Tools

  • NVIDIA JetPack
  • NVIDIA DeepStream SDK
  • NVIDIA DIGITS for training

JetPack includes:
• Full desktop Linux with NVIDIA drivers
• AI and Computer Vision libraries and APIs
• Developer tools
• Documentation and sample code

Training GPU:

  • Maxwell, Pascal, Volta, or Turing-based GPU (ideally with at least 6GB video memory) optionally, AWS P2/P3 instance or Microsoft Azure N-series
  • Ubuntu 16.04/18.04 x86_64

Deployment:

  • Jetson Nano Developer Kit with JetPack 4.2 or newer (Ubuntu 18.04 aarch64).
  • Jetson Xavier Developer Kit with JetPack 4.0 or newer (Ubuntu 18.04 aarch64)
  • Jetson TX2 Developer Kit with JetPack 3.0 or newer (Ubuntu 16.04 aarch64).
  • Jetson TX1 Developer Kit with JetPack 2.3 or newer (Ubuntu 16.04 aarch64).

Jetson Nano Developer Kit

Jetson Nano Device

Jetson Nano was introduced in April 2019 for only $99.

jetson nano
Jetson Nano image

  1. microSD card slot for main storage
  2. 40-pin expansion header
  3. Micro-USB port for 5V power input or for data
  4. Gigabit Ethernet port
  5. USB 3.0 ports (x4)
  6. HDMI output port
  7. DisplayPort connector
  8. DC Barrel jack for 5V power input
  9. MIPI CSI camera connector

power input: 3 and 8
camera: 9 (MIPI CSI camera)
green LED (D53) close to the micro USB port should turn green

inference performance
Jetson Nano inference performance

multiple cameras with jetson nano

Write Image to the microSD Card

  1. Download the jetson-nano-sd-card-image-r3223.zip
  2. Format the microSD card to ExFAT if it’s a 64Gb or higher card, and to FAT if it’s less.
  3. Use etcher or linux command to write image to microSD.

    Image size about 5G, so be patient to download
    Recommend use ether to write image to write image to microSD

linux command to write image to microSD

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
$ df -h 
Filesystem Size Used Avail Use% Mounted on
udev 7.8G 0 7.8G 0% /dev
tmpfs 1.6G 18M 1.6G 2% /run
/dev/sdb6 184G 162G 13G 93% /
tmpfs 7.8G 71M 7.8G 1% /dev/shm
tmpfs 5.0M 4.0K 5.0M 1% /run/lock
tmpfs 7.8G 0 7.8G 0% /sys/fs/cgroup
/dev/sdb5 453M 157M 270M 37% /boot
tmpfs 1.6G 56K 1.6G 1% /run/user/1000
/dev/sdb4 388G 337G 52G 87% /media/kezunlin/Workspace
/dev/sdc1 30G 32K 30G 1% /media/kezunlin/nano


$ dmesg | tail | awk '$3 == "sd" {print}'

#In this example, we can see the 32GB microSD card was assigned /dev/sdc:
[ 613.537818] sd 4:0:0:0: Attached scsi generic sg2 type 0
[ 613.940079] sd 4:0:0:0: [sdc] 62333952 512-byte logical blocks: (31.9 GB/29.7 GiB)
[ 613.940664] sd 4:0:0:0: [sdc] Write Protect is off
[ 613.940672] sd 4:0:0:0: [sdc] Mode Sense: 87 00 00 00
[ 613.942730] sd 4:0:0:0: [sdc] Write cache: disabled, read cache: enabled, doesnt support DPO or FUA
[ 613.956666] sd 4:0:0:0: [sdc] Attached SCSI removable disk

# Use this command to write the zipped SD card image to the microSD card:

$ /usr/bin/unzip -p ~/Downloads/jetson-nano-sd-card-image-r3223.zip | sudo /bin/dd of=/dev/sdc bs=1M status=progress

0+167548 records in
0+167548 records out
12884901888 bytes (13 GB, 12 GiB) copied, 511.602 s, 25.2 MB/s


# 12 partitions generated by the writing process ???
$ sudo fdisk -l

GPT PMBR size mismatch (25165823 != 62333951) will be corrected by w(rite).
Disk /dev/sde: 29.7 GiB, 31914983424 bytes, 62333952 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: gpt
Disk identifier: E696E264-F2EA-434A-900C-D9ACA2F99E43

Device Start End Sectors Size Type
/dev/sde1 24576 25165790 25141215 12G Linux filesystem
/dev/sde2 2048 2303 256 128K Linux filesystem
/dev/sde3 4096 4991 896 448K Linux filesystem
/dev/sde4 6144 7295 1152 576K Linux filesystem
/dev/sde5 8192 8319 128 64K Linux filesystem
/dev/sde6 10240 10623 384 192K Linux filesystem
/dev/sde7 12288 13439 1152 576K Linux filesystem
/dev/sde8 14336 14463 128 64K Linux filesystem
/dev/sde9 16384 17663 1280 640K Linux filesystem
/dev/sde10 18432 19327 896 448K Linux filesystem
/dev/sde11 20480 20735 256 128K Linux filesystem
/dev/sde12 22528 22687 160 80K Linux filesystem

Partition table entries are not in disk order.


# When the dd command finishes, eject the disk device from the command line:
$ sudo eject /dev/sdc

# Physically remove microSD card from the computer.

Steps:

  1. Insert the microSD card into the appropriate slot
  2. Connect the display and USB keyboard /mouse and Ethernet cable.
  3. Depending on the power supply you want to use, you may have to add or remove the jumper for power selection:
    – If using a jack(part 8), the jumper must be set.
    – if using USB (part 3), the jumper must be off.
  4. Plug in the power supply. The green LED (D53) close to the micro USB port should turn green, and the display should show the NVIDIA logo before booting begins.

Prepare Nano System


  • Jetson Nano L4T 32.2.1-20190812212815 (JetPack 4.2.2)
  • nv-jetson-nano-sd-card-image-r32.2.1.zip
  • DeepStream SDK 4.0.1 (gstreamer1.0)

  • Jetson Nano L4T 32.3-20191217(JetPack 4.3)
  • nv-jetson-nano-sd-card-image-r32.3.1.zip
  • DeepStream SDK 4.0.2 (gstreamer1.0)

  • Ubuntu 18.04 aarch64 (bionic)
  • CUDA 10.0 SM_72 (installed)
  • TensorRT-5.1.6.0 (installed)
  • OpenCV 3.3.1 (installed)
  • Python 2.7 (installed)
  • Python 3.6.9
  • Numpy 1.13.3
  • QT 5.9.5

CUDA-10.0 and TensorRT-5.1.6.0 has already installed on Jetson Nano.
ARM 64 machines such as the Jetson Nano and Raspberry Pi.

DeepStream SDK 4.0.1 requires the installation of JetPack 4.2.2.
DeepStream SDK 4.0.2 requires the installation of JetPack 4.3.

SSH for nano with ForwardX11

for nano, edit /etc/ssh/ssh_config

1
X11Forwarding yes

and restart ssh

1
2
3
$ sudo /etc/init.d/ssh restart
[sudo] password for nano:
[ ok ] Restarting ssh (via systemctl): ssh.service.

for my ubuntu client

edit ~/.ssh/config

1
2
3
4
Host nano
HostName 192.168.0.63
User nano
ForwardX11 yes

ForwardX11 yes

1
2
3
4
5
6
# method 1: `~/.ssh/config` with  `ForwardX11 yes`
$ ssh [email protected]

# method 2: with `-X`
$ ssh -X [email protected]
# `-X` means enabling ForwardX11

add CUDA envs

edit ~.bashrc

1
2
3
4
5
6
7
# Add this to your .bashrc file

export CUDA_HOME=/usr/local/cuda
# Adds the CUDA compiler to the PATH
export PATH=$CUDA_HOME/bin:$PATH
# Adds the libraries
export LD_LIBRARY_PATH=$CUDA_HOME/lib64:$LD_LIBRARY_PATH

check cuda version

1
2
3
4
5
6
7
$ source ~/.bashrc
$ nvcc --version

nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2019 NVIDIA Corporation
Built on Mon_Mar_11_22:13:24_CDT_2019
Cuda compilation tools, release 10.0, V10.0.326

check versions

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
$ uname -a 
Linux nano-desktop 4.9.140-tegra #1 SMP PREEMPT Sat Oct 19 15:54:06 PDT 2019 aarch64 aarch64 aarch64 GNU/Linux

$ dpkg-query --show nvidia-l4t-core
nvidia-l4t-core 32.2.1-20190812212815

$ python --version
Python 2.7.15+

$ git --version
git version 2.17.1

# check tensorrt version
$ ll -al /usr/lib/aarch64-linux-gnu/libnvinfer_plugin.so.5
lrwxrwxrwx 1 root root 26 Jun 5 2019 /usr/lib/aarch64-linux-gnu/libnvinfer_plugin.so.5 -> libnvinfer_plugin.so.5.1.6

NO NEED TO download TensorRT 5.1.5.0 GA for Ubuntu 18.04 and CUDA 10.0 tar package from here and place at /opt/TensorRT-5.1.5.0

install packages

  • cmake 3.10.2
  • cmake-gui 3.10.2
  • python 3.6.9
  • QT 5.9.5

use official ubuntu source
do not replace /etc/opt/sources.list with aliyun source,otherwise lots of packages will fail to install

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
# update
$ sudo apt-get update

# install cmake
$ sudo apt-get install cmake

Get:1 http://ports.ubuntu.com/ubuntu-ports bionic-updates/main arm64 cmake arm64 3.10.2-1ubuntu2.18.04.1 [2,971 kB]

# install cmake-gui
$ sudo apt-get install cmake-gui cmake-qt-gui

Get:1 http://ports.ubuntu.com/ubuntu-ports bionic-updates/universe arm64 cmake-qt-gui arm64 3.10.2-1ubuntu2.18.04.1 [1,527 kB]


# install python 3.6.9
$ sudo apt -y install libpython3-dev python3-numpy

Get:1 http://ports.ubuntu.com/ubuntu-ports bionic-updates/main arm64 libpython3.6 arm64 3.6.9-1~18.04 [1,307 kB]
Get:2 http://ports.ubuntu.com/ubuntu-ports bionic-updates/main arm64 python3.6 arm64 3.6.9-1~18.04 [203 kB]
Get:3 http://ports.ubuntu.com/ubuntu-ports bionic-updates/main arm64 libpython3.6-stdlib arm64 3.6.9-1~18.04 [1,609 kB]
Get:4 http://ports.ubuntu.com/ubuntu-ports bionic-updates/main arm64 python3.6-minimal arm64 3.6.9-1~18.04 [1,327 kB]
Get:5 http://ports.ubuntu.com/ubuntu-ports bionic-updates/main arm64 libpython3.6-minimal arm64 3.6.9-1~18.04 [528 kB]
Get:6 http://ports.ubuntu.com/ubuntu-ports bionic-updates/main arm64 libpython3.6-dev arm64 3.6.9-1~18.04 [45.0 MB]
Get:7 http://ports.ubuntu.com/ubuntu-ports bionic-updates/main arm64 libpython3-dev arm64 3.6.7-1~18.04 [7,328 B]
Get:8 http://ports.ubuntu.com/ubuntu-ports bionic/main arm64 python3-numpy arm64 1:1.13.3-2ubuntu1 [1,734 kB]

# install qt5
$ sudo apt-get install qtbase5-dev

Get:1 http://ports.ubuntu.com/ubuntu-ports bionic-updates/main arm64 libqt5core5a arm64 5.9.5+dfsg-0ubuntu2.4 [1933 kB]

packages conclusion

1
2
3
4
5
6
7
8
9
10
11
12
13
sudo apt -y install cmake cmake-gui cmake-qt-gui
sudo apt -y install libpython3-dev python3-numpy python3-pip
sudo apt -y install qtbase5-dev

# other packages for c++ programs
sudo apt -y install libcrypto++-dev
sudo apt -y install libgoogle-glog-dev
sudo apt -y install libgflags-dev
sudo apt -y install --no-install-recommends libboost-all-dev

# top and jtop
sudo -H pip3 install jetson-stats
sudo jtop -h

Build jetson-inference

see jetson-inference

cmake and configure

1
2
3
4
5
6
git clone --recursive https://github.com/dusty-nv/jetson-inference
cd jetson-inference
git submodule update --init
mkdir build
cd build
sudo cmake ..

congigure

do not donwload models
do not download pytorch

run download models and pytorch later if you really need

1
2
3
4
5
$ cd jetson-inference/tools
$ ./download-models.sh

$ cd jetson-inference/build
$ ./install-pytorch.sh

or download models from here
wget -b -c https://github.com/dusty-nv/jetson-inference/releases/download/model-mirror-190618/ResNet-18.tar.gz

congigure output
cmake-gui jetson-inference

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
[jetson-inference]  Checking for 'dialog' deb package...installed
[jetson-inference] FOUND_DIALOG=INSTALLED

[jetson-inference] Model selection status: 0
[jetson-inference] No models were selected for download.

[jetson-inference] to run this tool again, use the following commands:
$ cd <jetson-inference>/tools
$ ./download-models.sh

[jetson-inference] Checking for 'dialog' deb package...installed
[jetson-inference] FOUND_DIALOG=INSTALLED

head: cannot open '/etc/nv_tegra_release' for reading: No such file or directory
[jetson-inference] reading L4T version from "dpkg-query --show nvidia-l4t-core"
[jetson-inference] Jetson BSP Version: L4T R32.2

[jetson-inference] Package selection status: 1
[jetson-inference] Package selection cancelled.

[jetson-inference] installation complete, exiting with status code 0
[jetson-inference] to run this tool again, use the following commands:
$ cd <jetson-inference>/build
$ ./install-pytorch.sh

[Pre-build] Finished CMakePreBuild script

-- Finished installing dependencies
-- using patched FindCUDA.cmake
Looking for pthread.h
Looking for pthread.h - found
Looking for pthread_create
Looking for pthread_create - not found
Looking for pthread_create in pthreads
Looking for pthread_create in pthreads - not found
Looking for pthread_create in pthread
Looking for pthread_create in pthread - found
Found Threads: TRUE

-- using patched FindCUDA.cmake
-- CUDA version: 10.0
-- CUDA 10 detected, enabling SM_72
-- OpenCV version: 3.3.1
-- OpenCV version >= 3.0.0, enabling OpenCV
-- system arch: aarch64
-- output path: /home/nano/git/jetson-inference/build/aarch64
-- Copying /home/nano/git/jetson-inference/c/detectNet.h
-- Copying /home/nano/git/jetson-inference/c/homographyNet.h
-- Copying /home/nano/git/jetson-inference/c/imageNet.h
-- Copying /home/nano/git/jetson-inference/c/segNet.h
-- Copying /home/nano/git/jetson-inference/c/superResNet.h
-- Copying /home/nano/git/jetson-inference/c/tensorNet.h
-- Copying /home/nano/git/jetson-inference/c/imageNet.cuh
-- Copying /home/nano/git/jetson-inference/calibration/randInt8Calibrator.h
Could NOT find Doxygen (missing: DOXYGEN_EXECUTABLE)
-- found Qt5Widgets version: 5.9.5
-- found Qt5Widgets defines: -DQT_WIDGETS_LIB;-DQT_GUI_LIB;-DQT_CORE_LIB
-- found Qt5Widgets library: Qt5::Widgets
-- found Qt5Widgets include: /usr/include/aarch64-linux-gnu/qt5/;/usr/include/aarch64-linux-gnu/qt5/QtWidgets;/usr/include/aarch64-linux-gnu/qt5/QtGui;/usr/include/aarch64-linux-gnu/qt5/QtCore;/usr/lib/aarch64-linux-gnu/qt5//mkspecs/linux-g++
-- camera-capture: building as submodule, /home/nano/git/jetson-inference/tools
-- jetson-utils: building as submodule, /home/nano/git/jetson-inference
-- Copying /home/nano/git/jetson-inference/utils/XML.h
-- Copying /home/nano/git/jetson-inference/utils/commandLine.h
-- Copying /home/nano/git/jetson-inference/utils/filesystem.h
-- Copying /home/nano/git/jetson-inference/utils/mat33.h
-- Copying /home/nano/git/jetson-inference/utils/pi.h
-- Copying /home/nano/git/jetson-inference/utils/rand.h
-- Copying /home/nano/git/jetson-inference/utils/timespec.h
-- Copying /home/nano/git/jetson-inference/utils/camera/gstCamera.h
-- Copying /home/nano/git/jetson-inference/utils/camera/v4l2Camera.h
-- Copying /home/nano/git/jetson-inference/utils/codec/gstDecoder.h
-- Copying /home/nano/git/jetson-inference/utils/codec/gstEncoder.h
-- Copying /home/nano/git/jetson-inference/utils/codec/gstUtility.h
-- Copying /home/nano/git/jetson-inference/utils/cuda/cudaFont.h
-- Copying /home/nano/git/jetson-inference/utils/cuda/cudaMappedMemory.h
-- Copying /home/nano/git/jetson-inference/utils/cuda/cudaNormalize.h
-- Copying /home/nano/git/jetson-inference/utils/cuda/cudaOverlay.h
-- Copying /home/nano/git/jetson-inference/utils/cuda/cudaRGB.h
-- Copying /home/nano/git/jetson-inference/utils/cuda/cudaResize.h
-- Copying /home/nano/git/jetson-inference/utils/cuda/cudaUtility.h
-- Copying /home/nano/git/jetson-inference/utils/cuda/cudaWarp.h
-- Copying /home/nano/git/jetson-inference/utils/cuda/cudaYUV.h
-- Copying /home/nano/git/jetson-inference/utils/display/glDisplay.h
-- Copying /home/nano/git/jetson-inference/utils/display/glTexture.h
-- Copying /home/nano/git/jetson-inference/utils/display/glUtility.h
-- Copying /home/nano/git/jetson-inference/utils/image/imageIO.h
-- Copying /home/nano/git/jetson-inference/utils/image/loadImage.h
-- Copying /home/nano/git/jetson-inference/utils/input/devInput.h
-- Copying /home/nano/git/jetson-inference/utils/input/devJoystick.h
-- Copying /home/nano/git/jetson-inference/utils/input/devKeyboard.h
-- Copying /home/nano/git/jetson-inference/utils/network/Endian.h
-- Copying /home/nano/git/jetson-inference/utils/network/IPv4.h
-- Copying /home/nano/git/jetson-inference/utils/network/NetworkAdapter.h
-- Copying /home/nano/git/jetson-inference/utils/network/Socket.h
-- Copying /home/nano/git/jetson-inference/utils/threads/Event.h
-- Copying /home/nano/git/jetson-inference/utils/threads/Mutex.h
-- Copying /home/nano/git/jetson-inference/utils/threads/Process.h
-- Copying /home/nano/git/jetson-inference/utils/threads/Thread.h
-- trying to build Python bindings for Python versions: 2.7;3.6;3.7
-- detecting Python 2.7...
-- found Python version: 2.7 (2.7.15+)
-- found Python include: /usr/include/python2.7
-- found Python library: /usr/lib/aarch64-linux-gnu/libpython2.7.so
-- CMake module path: /home/nano/git/jetson-inference/utils/cuda;/home/nano/git/jetson-inference/utils/python/bindings
NumPy ver. 1.13.3 found (include: /usr/lib/python2.7/dist-packages/numpy/core/include)
-- found NumPy version: 1.13.3
-- found NumPy include: /usr/lib/python2.7/dist-packages/numpy/core/include
-- detecting Python 3.6...
-- found Python version: 3.6 (3.6.9)
-- found Python include: /usr/include/python3.6m
-- found Python library: /usr/lib/aarch64-linux-gnu/libpython3.6m.so
-- CMake module path: /home/nano/git/jetson-inference/utils/cuda;/home/nano/git/jetson-inference/utils/python/bindings
NumPy ver. 1.13.3 found (include: /usr/lib/python3/dist-packages/numpy/core/include)
-- found NumPy version: 1.13.3
-- found NumPy include: /usr/lib/python3/dist-packages/numpy/core/include
-- detecting Python 3.7...
-- Python 3.7 wasn't found
-- Copying /home/nano/git/jetson-inference/utils/python/examples/camera-viewer.py
-- Copying /home/nano/git/jetson-inference/utils/python/examples/cuda-from-numpy.py
-- Copying /home/nano/git/jetson-inference/utils/python/examples/cuda-to-numpy.py
-- Copying /home/nano/git/jetson-inference/utils/python/examples/gl-display-test.py
-- trying to build Python bindings for Python versions: 2.7;3.6;3.7
-- detecting Python 2.7...
-- found Python version: 2.7 (2.7.15+)
-- found Python include: /usr/include/python2.7
-- found Python library: /usr/lib/aarch64-linux-gnu/libpython2.7.so
-- detecting Python 3.6...
-- found Python version: 3.6 (3.6.9)
-- found Python include: /usr/include/python3.6m
-- found Python library: /usr/lib/aarch64-linux-gnu/libpython3.6m.so
-- detecting Python 3.7...
-- Python 3.7 wasn't found
-- Copying /home/nano/git/jetson-inference/python/examples/detectnet-camera.py
-- Copying /home/nano/git/jetson-inference/python/examples/detectnet-console.py
-- Copying /home/nano/git/jetson-inference/python/examples/imagenet-camera.py
-- Copying /home/nano/git/jetson-inference/python/examples/imagenet-console.py
-- Copying /home/nano/git/jetson-inference/python/examples/my-detection.py
-- Copying /home/nano/git/jetson-inference/python/examples/my-recognition.py
-- Copying /home/nano/git/jetson-inference/python/examples/segnet-batch.py
-- Copying /home/nano/git/jetson-inference/python/examples/segnet-camera.py
-- Copying /home/nano/git/jetson-inference/python/examples/segnet-console.py
Configuring done

compile and install

generate and compile

1
sudo make

install jetson-inference

1
2
sudo make install
sudo ldconfig

output

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
[  1%] Linking CXX shared library ../aarch64/lib/libjetson-utils.so
[ 31%] Built target jetson-utils
[ 32%] Linking CXX shared library aarch64/lib/libjetson-inference.so
[ 43%] Built target jetson-inference
[ 44%] Linking CXX executable ../../aarch64/bin/imagenet-console
[ 45%] Built target imagenet-console
[ 46%] Linking CXX executable ../../aarch64/bin/imagenet-camera
[ 47%] Built target imagenet-camera
[ 47%] Linking CXX executable ../../aarch64/bin/detectnet-console
[ 48%] Built target detectnet-console
[ 49%] Linking CXX executable ../../aarch64/bin/detectnet-camera
[ 50%] Built target detectnet-camera
[ 50%] Linking CXX executable ../../aarch64/bin/segnet-console
[ 51%] Built target segnet-console
[ 52%] Linking CXX executable ../../aarch64/bin/segnet-camera
[ 53%] Built target segnet-camera
[ 54%] Linking CXX executable ../../aarch64/bin/superres-console
[ 55%] Built target superres-console
[ 56%] Linking CXX executable ../../aarch64/bin/homography-console
[ 57%] Built target homography-console
[ 58%] Linking CXX executable ../../aarch64/bin/homography-camera
[ 59%] Built target homography-camera
[ 60%] Automatic MOC for target camera-capture
[ 60%] Built target camera-capture_autogen
[ 61%] Linking CXX executable ../../aarch64/bin/camera-capture
[ 64%] Built target camera-capture
[ 65%] Linking CXX executable ../../aarch64/bin/trt-bench
[ 66%] Built target trt-bench
[ 67%] Linking CXX executable ../../aarch64/bin/trt-console
[ 68%] Built target trt-console
[ 69%] Linking CXX executable ../../../aarch64/bin/camera-viewer
[ 70%] Built target camera-viewer
[ 71%] Linking CXX executable ../../../aarch64/bin/v4l2-console
[ 72%] Built target v4l2-console
[ 73%] Linking CXX executable ../../../aarch64/bin/v4l2-display
[ 74%] Built target v4l2-display
[ 75%] Linking CXX executable ../../../aarch64/bin/gl-display-test
[ 76%] Built target gl-display-test
[ 76%] Linking CXX shared library ../../../aarch64/lib/python/2.7/jetson_utils_python.so
[ 82%] Built target jetson-utils-python-27
[ 83%] Linking CXX shared library ../../../aarch64/lib/python/3.6/jetson_utils_python.so
[ 89%] Built target jetson-utils-python-36
[ 90%] Linking CXX shared library ../../aarch64/lib/python/2.7/jetson_inference_python.so
[ 95%] Built target jetson-inference-python-27
[ 96%] Linking CXX shared library ../../aarch64/lib/python/3.6/jetson_inference_python.so
[100%] Built target jetson-inference-python-36
Install the project...
-- Install configuration: ""
-- Installing: /usr/local/include/jetson-inference/detectNet.h
-- Installing: /usr/local/include/jetson-inference/homographyNet.h
-- Installing: /usr/local/include/jetson-inference/imageNet.h
-- Installing: /usr/local/include/jetson-inference/segNet.h
-- Installing: /usr/local/include/jetson-inference/superResNet.h
-- Installing: /usr/local/include/jetson-inference/tensorNet.h
-- Installing: /usr/local/include/jetson-inference/imageNet.cuh
-- Installing: /usr/local/include/jetson-inference/randInt8Calibrator.h
-- Installing: /usr/local/lib/libjetson-inference.so
-- Set runtime path of "/usr/local/lib/libjetson-inference.so" to ""
-- Installing: /usr/local/share/jetson-inference/cmake/jetson-inferenceConfig.cmake
-- Installing: /usr/local/share/jetson-inference/cmake/jetson-inferenceConfig-noconfig.cmake
-- Installing: /usr/local/bin/imagenet-console
-- Set runtime path of "/usr/local/bin/imagenet-console" to ""
-- Installing: /usr/local/bin/imagenet-camera
-- Set runtime path of "/usr/local/bin/imagenet-camera" to ""
-- Installing: /usr/local/bin/detectnet-console
-- Set runtime path of "/usr/local/bin/detectnet-console" to ""
-- Installing: /usr/local/bin/detectnet-camera
-- Set runtime path of "/usr/local/bin/detectnet-camera" to ""
-- Installing: /usr/local/bin/segnet-console
-- Set runtime path of "/usr/local/bin/segnet-console" to ""
-- Installing: /usr/local/bin/segnet-camera
-- Set runtime path of "/usr/local/bin/segnet-camera" to ""
-- Installing: /usr/local/bin/superres-console
-- Set runtime path of "/usr/local/bin/superres-console" to ""
-- Installing: /usr/local/bin/homography-console
-- Set runtime path of "/usr/local/bin/homography-console" to ""
-- Installing: /usr/local/bin/homography-camera
-- Set runtime path of "/usr/local/bin/homography-camera" to ""
-- Installing: /usr/local/bin/camera-capture
-- Set runtime path of "/usr/local/bin/camera-capture" to ""
-- Installing: /usr/local/include/jetson-utils/XML.h
-- Installing: /usr/local/include/jetson-utils/commandLine.h
-- Installing: /usr/local/include/jetson-utils/filesystem.h
-- Installing: /usr/local/include/jetson-utils/mat33.h
-- Installing: /usr/local/include/jetson-utils/pi.h
-- Installing: /usr/local/include/jetson-utils/rand.h
-- Installing: /usr/local/include/jetson-utils/timespec.h
-- Installing: /usr/local/include/jetson-utils/gstCamera.h
-- Installing: /usr/local/include/jetson-utils/v4l2Camera.h
-- Installing: /usr/local/include/jetson-utils/gstDecoder.h
-- Installing: /usr/local/include/jetson-utils/gstEncoder.h
-- Installing: /usr/local/include/jetson-utils/gstUtility.h
-- Installing: /usr/local/include/jetson-utils/cudaFont.h
-- Installing: /usr/local/include/jetson-utils/cudaMappedMemory.h
-- Installing: /usr/local/include/jetson-utils/cudaNormalize.h
-- Installing: /usr/local/include/jetson-utils/cudaOverlay.h
-- Installing: /usr/local/include/jetson-utils/cudaRGB.h
-- Installing: /usr/local/include/jetson-utils/cudaResize.h
-- Installing: /usr/local/include/jetson-utils/cudaUtility.h
-- Installing: /usr/local/include/jetson-utils/cudaWarp.h
-- Installing: /usr/local/include/jetson-utils/cudaYUV.h
-- Installing: /usr/local/include/jetson-utils/glDisplay.h
-- Installing: /usr/local/include/jetson-utils/glTexture.h
-- Installing: /usr/local/include/jetson-utils/glUtility.h
-- Installing: /usr/local/include/jetson-utils/imageIO.h
-- Installing: /usr/local/include/jetson-utils/loadImage.h
-- Installing: /usr/local/include/jetson-utils/devInput.h
-- Installing: /usr/local/include/jetson-utils/devJoystick.h
-- Installing: /usr/local/include/jetson-utils/devKeyboard.h
-- Installing: /usr/local/include/jetson-utils/Endian.h
-- Installing: /usr/local/include/jetson-utils/IPv4.h
-- Installing: /usr/local/include/jetson-utils/NetworkAdapter.h
-- Installing: /usr/local/include/jetson-utils/Socket.h
-- Installing: /usr/local/include/jetson-utils/Event.h
-- Installing: /usr/local/include/jetson-utils/Mutex.h
-- Installing: /usr/local/include/jetson-utils/Process.h
-- Installing: /usr/local/include/jetson-utils/Thread.h
-- Installing: /usr/local/lib/libjetson-utils.so
-- Installing: /usr/local/share/jetson-utils/cmake/jetson-utilsConfig.cmake
-- Installing: /usr/local/share/jetson-utils/cmake/jetson-utilsConfig-noconfig.cmake
-- Installing: /usr/local/bin/camera-viewer
-- Set runtime path of "/usr/local/bin/camera-viewer" to ""
-- Installing: /usr/local/bin/gl-display-test
-- Set runtime path of "/usr/local/bin/gl-display-test" to ""
-- Installing: /usr/local/bin/camera-viewer.py
-- Installing: /usr/local/bin/cuda-from-numpy.py
-- Installing: /usr/local/bin/cuda-to-numpy.py
-- Installing: /usr/local/bin/gl-display-test.py
-- Installing: /usr/lib/python2.7/dist-packages/jetson_utils_python.so
-- Set runtime path of "/usr/lib/python2.7/dist-packages/jetson_utils_python.so" to ""
-- Installing: /usr/lib/python2.7/dist-packages/Jetson
-- Installing: /usr/lib/python2.7/dist-packages/Jetson/Utils
-- Installing: /usr/lib/python2.7/dist-packages/Jetson/Utils/__init__.py
-- Installing: /usr/lib/python2.7/dist-packages/Jetson/__init__.py
-- Installing: /usr/lib/python2.7/dist-packages/jetson
-- Installing: /usr/lib/python2.7/dist-packages/jetson/utils
-- Installing: /usr/lib/python2.7/dist-packages/jetson/utils/__init__.py
-- Installing: /usr/lib/python2.7/dist-packages/jetson/__init__.py
-- Installing: /usr/lib/python3.6/dist-packages/jetson_utils_python.so
-- Set runtime path of "/usr/lib/python3.6/dist-packages/jetson_utils_python.so" to ""
-- Installing: /usr/lib/python3.6/dist-packages/Jetson
-- Installing: /usr/lib/python3.6/dist-packages/Jetson/Utils
-- Installing: /usr/lib/python3.6/dist-packages/Jetson/Utils/__init__.py
-- Installing: /usr/lib/python3.6/dist-packages/Jetson/__init__.py
-- Installing: /usr/lib/python3.6/dist-packages/jetson
-- Installing: /usr/lib/python3.6/dist-packages/jetson/utils
-- Installing: /usr/lib/python3.6/dist-packages/jetson/utils/__init__.py
-- Installing: /usr/lib/python3.6/dist-packages/jetson/__init__.py
-- Installing: /usr/local/bin/detectnet-camera.py
-- Installing: /usr/local/bin/detectnet-console.py
-- Installing: /usr/local/bin/imagenet-camera.py
-- Installing: /usr/local/bin/imagenet-console.py
-- Installing: /usr/local/bin/my-detection.py
-- Installing: /usr/local/bin/my-recognition.py
-- Installing: /usr/local/bin/segnet-batch.py
-- Installing: /usr/local/bin/segnet-camera.py
-- Installing: /usr/local/bin/segnet-console.py
-- Installing: /usr/lib/python2.7/dist-packages/jetson_inference_python.so
-- Set runtime path of "/usr/lib/python2.7/dist-packages/jetson_inference_python.so" to ""
-- Up-to-date: /usr/lib/python2.7/dist-packages/Jetson
-- Installing: /usr/lib/python2.7/dist-packages/Jetson/__init__.py
-- Installing: /usr/lib/python2.7/dist-packages/Jetson/Inference
-- Installing: /usr/lib/python2.7/dist-packages/Jetson/Inference/__init__.py
-- Up-to-date: /usr/lib/python2.7/dist-packages/jetson
-- Installing: /usr/lib/python2.7/dist-packages/jetson/__init__.py
-- Installing: /usr/lib/python2.7/dist-packages/jetson/inference
-- Installing: /usr/lib/python2.7/dist-packages/jetson/inference/__init__.py
-- Installing: /usr/lib/python3.6/dist-packages/jetson_inference_python.so
-- Set runtime path of "/usr/lib/python3.6/dist-packages/jetson_inference_python.so" to ""
-- Up-to-date: /usr/lib/python3.6/dist-packages/Jetson
-- Installing: /usr/lib/python3.6/dist-packages/Jetson/__init__.py
-- Installing: /usr/lib/python3.6/dist-packages/Jetson/Inference
-- Installing: /usr/lib/python3.6/dist-packages/Jetson/Inference/__init__.py
-- Up-to-date: /usr/lib/python3.6/dist-packages/jetson
-- Installing: /usr/lib/python3.6/dist-packages/jetson/__init__.py
-- Installing: /usr/lib/python3.6/dist-packages/jetson/inference
-- Installing: /usr/lib/python3.6/dist-packages/jetson/inference/__init__.py

The project will be built to jetson-inference/build/aarch64, with the following directory structure:

|-build
  \aarch64
      \bin             where the sample binaries are built to
        \networks     where the network models are stored
        \images       where the test images are stored
      \include         where the headers reside
      \lib             where the libraries are build to

These also get installed under /usr/local/
The Python bindings for the jetson.inference and jetson.utils modules also get installed under /usr/lib/python*/dist-packages/.

view libjetson-utils and libjetson-inference in lib

1
2
3
4
5
6
7
8
9
10
11
12
13
$ tree build/aarch64/lib
.
├── libjetson-inference.so
├── libjetson-utils.so
└── python
├── 2.7
│   ├── jetson_inference_python.so
│   └── jetson_utils_python.so
└── 3.6
├── jetson_inference_python.so
└── jetson_utils_python.so

3 directories, 6 files

libjetson-inference.so

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
$ ldd libjetson-inference.so 

linux-vdso.so.1 (0x0000007fa6e8c000)
libpthread.so.0 => /lib/aarch64-linux-gnu/libpthread.so.0 (0x0000007fa6cf3000)
libdl.so.2 => /lib/aarch64-linux-gnu/libdl.so.2 (0x0000007fa6cde000)
librt.so.1 => /lib/aarch64-linux-gnu/librt.so.1 (0x0000007fa6cc7000)
libjetson-utils.so => /home/nano/git/jetson-inference/build/aarch64/lib/libjetson-utils.so (0x0000007fa6b6f000)
libnvinfer.so.5 => /usr/lib/aarch64-linux-gnu/libnvinfer.so.5 (0x0000007f9dc23000)
libnvinfer_plugin.so.5 => /usr/lib/aarch64-linux-gnu/libnvinfer_plugin.so.5 (0x0000007f9d94d000)
libnvparsers.so.5 => /usr/lib/aarch64-linux-gnu/libnvparsers.so.5 (0x0000007f9d60e000)
libnvonnxparser.so.0 => /usr/lib/aarch64-linux-gnu/libnvonnxparser.so.0 (0x0000007f9d1ea000)
libopencv_calib3d.so.3.3 => /usr/lib/libopencv_calib3d.so.3.3 (0x0000007f9d0be000)
libopencv_core.so.3.3 => /usr/lib/libopencv_core.so.3.3 (0x0000007f9cde9000)
libstdc++.so.6 => /usr/lib/aarch64-linux-gnu/libstdc++.so.6 (0x0000007f9cc56000)
libm.so.6 => /lib/aarch64-linux-gnu/libm.so.6 (0x0000007f9cb9c000)
libgcc_s.so.1 => /lib/aarch64-linux-gnu/libgcc_s.so.1 (0x0000007f9cb78000)
libc.so.6 => /lib/aarch64-linux-gnu/libc.so.6 (0x0000007f9ca1f000)
/lib/ld-linux-aarch64.so.1 (0x0000007fa6e61000)
libGL.so.1 => /usr/lib/aarch64-linux-gnu/libGL.so.1 (0x0000007f9c920000)
libGLEW.so.2.0 => /usr/lib/aarch64-linux-gnu/libGLEW.so.2.0 (0x0000007f9c874000)
libgstreamer-1.0.so.0 => /usr/lib/aarch64-linux-gnu/libgstreamer-1.0.so.0 (0x0000007f9c744000)
libgstapp-1.0.so.0 => /usr/lib/aarch64-linux-gnu/libgstapp-1.0.so.0 (0x0000007f9c726000)
libcudnn.so.7 => /usr/lib/aarch64-linux-gnu/libcudnn.so.7 (0x0000007f858c0000)
libcublas.so.10.0 => /usr/local/cuda-10.0/targets/aarch64-linux/lib/libcublas.so.10.0 (0x0000007f7ff59000)
libcudart.so.10.0 => /usr/local/cuda-10.0/targets/aarch64-linux/lib/libcudart.so.10.0 (0x0000007f7fee8000)
libopencv_flann.so.3.3 => /usr/lib/libopencv_flann.so.3.3 (0x0000007f7fe85000)
libopencv_imgproc.so.3.3 => /usr/lib/libopencv_imgproc.so.3.3 (0x0000007f7f6b8000)

imageNet demo

C++

$ cd etson-inference/build/aarch64/bin
$ sudo ./imagenet-console --network=resnet-18 images/orange_0.jpg output_0.jpg

output

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
imageNet -- loading classification network model from:
-- prototxt networks/ResNet-18/deploy.prototxt
-- model networks/ResNet-18/ResNet-18.caffemodel
-- class_labels networks/ilsvrc12_synset_words.txt
-- input_blob 'data'
-- output_blob 'prob'
-- batch_size 1

[TRT] TensorRT version 5.1.6
[TRT] loading NVIDIA plugins...
[TRT] Plugin Creator registration succeeded - GridAnchor_TRT
[TRT] Plugin Creator registration succeeded - NMS_TRT
[TRT] Plugin Creator registration succeeded - Reorg_TRT
[TRT] Plugin Creator registration succeeded - Region_TRT
[TRT] Plugin Creator registration succeeded - Clip_TRT
[TRT] Plugin Creator registration succeeded - LReLU_TRT
[TRT] Plugin Creator registration succeeded - PriorBox_TRT
[TRT] Plugin Creator registration succeeded - Normalize_TRT
[TRT] Plugin Creator registration succeeded - RPROI_TRT
[TRT] Plugin Creator registration succeeded - BatchedNMS_TRT
[TRT] completed loading NVIDIA plugins.
[TRT] detected model format - caffe (extension '.caffemodel')
[TRT] desired precision specified for GPU: FASTEST
[TRT] requested fasted precision for device GPU without providing valid calibrator, disabling INT8
[TRT] native precisions detected for GPU: FP32, FP16
[TRT] selecting fastest native precision for GPU: FP16
[TRT] attempting to open engine cache file networks/ResNet-18/ResNet-18.caffemodel.1.1.GPU.FP16.engine
[TRT] cache file not found, profiling network model on device GPU
[TRT] device GPU, loading networks/ResNet-18/deploy.prototxt networks/ResNet-18/ResNet-18.caffemodel
[TRT] retrieved Output tensor "prob": 1000x1x1
[TRT] retrieved Input tensor "data": 3x224x224
[TRT] device GPU, configuring CUDA engine
[TRT] device GPU, building FP16: ON
[TRT] device GPU, building INT8: OFF
[TRT] device GPU, building CUDA engine (this may take a few minutes the first time a network is loaded)
[TRT] device GPU, completed building CUDA engine
[TRT] network profiling complete, writing engine cache to networks/ResNet-18/ResNet-18.caffemodel.1.1.GPU.FP16.engine
[TRT] device GPU, completed writing engine cache to networks/ResNet-18/ResNet-18.caffemodel.1.1.GPU.FP16.engine
[TRT] device GPU, networks/ResNet-18/ResNet-18.caffemodel loaded
[TRT] device GPU, CUDA engine context initialized with 2 bindings
[TRT] binding -- index 0
-- name 'data'
-- type FP32
-- in/out INPUT
-- # dims 3
-- dim #0 3 (CHANNEL)
-- dim #1 224 (SPATIAL)
-- dim #2 224 (SPATIAL)
[TRT] binding -- index 1
-- name 'prob'
-- type FP32
-- in/out OUTPUT
-- # dims 3
-- dim #0 1000 (CHANNEL)
-- dim #1 1 (SPATIAL)
-- dim #2 1 (SPATIAL)
[TRT] binding to input 0 data binding index: 0
[TRT] binding to input 0 data dims (b=1 c=3 h=224 w=224) size=602112
[TRT] binding to output 0 prob binding index: 1
[TRT] binding to output 0 prob dims (b=1 c=1000 h=1 w=1) size=4000
device GPU, networks/ResNet-18/ResNet-18.caffemodel initialized.
[TRT] networks/ResNet-18/ResNet-18.caffemodel loaded
imageNet -- loaded 1000 class info entries
networks/ResNet-18/ResNet-18.caffemodel initialized.
[image] loaded 'images/orange_0.jpg' (1920 x 1920, 3 channels)
class 0950 - 0.996028 (orange)
imagenet-console: 'images/orange_0.jpg' -> 99.60276% class #950 (orange)

[TRT] ------------------------------------------------
[TRT] Timing Report networks/ResNet-18/ResNet-18.caffemodel
[TRT] ------------------------------------------------
[TRT] Pre-Process CPU 0.10824ms CUDA 0.34156ms
[TRT] Network CPU 12.91854ms CUDA 12.47026ms
[TRT] Post-Process CPU 0.80311ms CUDA 0.82672ms
[TRT] Total CPU 13.82989ms CUDA 13.63854ms
[TRT] ------------------------------------------------

[TRT] note -- when processing a single image, run 'sudo jetson_clocks' before
to disable DVFS for more accurate profiling/timing measurements

imagenet-console: attempting to save output image to 'output_0.jpg'
imagenet-console: completed saving 'output_0.jpg'
imagenet-console: shutting down...
imagenet-console: shutdown complete

Python

$ cd etson-inference/build/aarch64/bin
$ sudo ./imagenet-console.py --network=resnet-18 images/orange_0.jpg output_0.jpg

output

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
jetson.inference.__init__.py
jetson.inference -- initializing Python 2.7 bindings...
jetson.inference -- registering module types...
jetson.inference -- done registering module types
jetson.inference -- done Python 2.7 binding initialization
jetson.utils.__init__.py
jetson.utils -- initializing Python 2.7 bindings...
jetson.utils -- registering module functions...
jetson.utils -- done registering module functions
jetson.utils -- registering module types...
jetson.utils -- done registering module types
jetson.utils -- done Python 2.7 binding initialization
[image] loaded 'images/orange_0.jpg' (1920 x 1920, 3 channels)
jetson.inference -- PyTensorNet_New()
jetson.inference -- PyImageNet_Init()
jetson.inference -- imageNet loading network using argv command line params
jetson.inference -- imageNet.__init__() argv[0] = './imagenet-console.py'
jetson.inference -- imageNet.__init__() argv[1] = '--network=resnet-18'
jetson.inference -- imageNet.__init__() argv[2] = 'images/orange_0.jpg'
jetson.inference -- imageNet.__init__() argv[3] = 'output_0.jpg'

imageNet -- loading classification network model from:
-- prototxt networks/ResNet-18/deploy.prototxt
-- model networks/ResNet-18/ResNet-18.caffemodel
-- class_labels networks/ilsvrc12_synset_words.txt
-- input_blob 'data'
-- output_blob 'prob'
-- batch_size 1

[TRT] TensorRT version 5.1.6
[TRT] loading NVIDIA plugins...
[TRT] Plugin Creator registration succeeded - GridAnchor_TRT
[TRT] Plugin Creator registration succeeded - NMS_TRT
[TRT] Plugin Creator registration succeeded - Reorg_TRT
[TRT] Plugin Creator registration succeeded - Region_TRT
[TRT] Plugin Creator registration succeeded - Clip_TRT
[TRT] Plugin Creator registration succeeded - LReLU_TRT
[TRT] Plugin Creator registration succeeded - PriorBox_TRT
[TRT] Plugin Creator registration succeeded - Normalize_TRT
[TRT] Plugin Creator registration succeeded - RPROI_TRT
[TRT] Plugin Creator registration succeeded - BatchedNMS_TRT
[TRT] completed loading NVIDIA plugins.
[TRT] detected model format - caffe (extension '.caffemodel')
[TRT] desired precision specified for GPU: FASTEST
[TRT] requested fasted precision for device GPU without providing valid calibrator, disabling INT8
[TRT] native precisions detected for GPU: FP32, FP16
[TRT] selecting fastest native precision for GPU: FP16
[TRT] attempting to open engine cache file networks/ResNet-18/ResNet-18.caffemodel.1.1.GPU.FP16.engine
[TRT] loading network profile from engine cache... networks/ResNet-18/ResNet-18.caffemodel.1.1.GPU.FP16.engine
[TRT] device GPU, networks/ResNet-18/ResNet-18.caffemodel loaded
[TRT] device GPU, CUDA engine context initialized with 2 bindings
[TRT] binding -- index 0
-- name 'data'
-- type FP32
-- in/out INPUT
-- # dims 3
-- dim #0 3 (CHANNEL)
-- dim #1 224 (SPATIAL)
-- dim #2 224 (SPATIAL)
[TRT] binding -- index 1
-- name 'prob'
-- type FP32
-- in/out OUTPUT
-- # dims 3
-- dim #0 1000 (CHANNEL)
-- dim #1 1 (SPATIAL)
-- dim #2 1 (SPATIAL)
[TRT] binding to input 0 data binding index: 0
[TRT] binding to input 0 data dims (b=1 c=3 h=224 w=224) size=602112
[TRT] binding to output 0 prob binding index: 1
[TRT] binding to output 0 prob dims (b=1 c=1000 h=1 w=1) size=4000
device GPU, networks/ResNet-18/ResNet-18.caffemodel initialized.
[TRT] networks/ResNet-18/ResNet-18.caffemodel loaded
imageNet -- loaded 1000 class info entries
networks/ResNet-18/ResNet-18.caffemodel initialized.
class 0950 - 0.996028 (orange)
image is recognized as 'orange' (class #950) with 99.602759% confidence


[TRT] ------------------------------------------------
[TRT] Timing Report networks/ResNet-18/ResNet-18.caffemodel
[TRT] ------------------------------------------------
[TRT] Pre-Process CPU 0.06884ms CUDA 0.32849ms
[TRT] Network CPU 11.44888ms CUDA 11.01536ms
[TRT] Post-Process CPU 0.20783ms CUDA 0.20708ms
[TRT] Total CPU 11.72555ms CUDA 11.55094ms
[TRT] ------------------------------------------------

[TRT] note -- when processing a single image, run 'sudo jetson_clocks' before
to disable DVFS for more accurate profiling/timing measurements

jetson.utils -- PyFont_New()
jetson.utils -- PyFont_Init()
jetson.utils -- PyFont_Dealloc()
jetson.utils -- freeing CUDA mapped memory
PyTensorNet_Dealloc()

my-recognition

1
2
3
# build
$ cd jetson-inference/examples/my-recognition
$ mkdir build && cd build && cmake-gui ..

my-recognition cmake-gui

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# compile 
$ make

Scanning dependencies of target my-recognition
[ 50%] Building CXX object CMakeFiles/my-recognition.dir/my-recognition.cpp.o
[100%] Linking CXX executable my-recognition
[100%] Built target my-recognition

# view libraries
$ ldd my-recognition
linux-vdso.so.1 (0x0000007fb5546000)
libjetson-inference.so => /usr/local/lib/libjetson-inference.so (0x0000007fb53ea000)
libjetson-utils.so => /usr/local/lib/libjetson-utils.so (0x0000007fb5292000)
libstdc++.so.6 => /usr/lib/aarch64-linux-gnu/libstdc++.so.6 (0x0000007fb50db000)
libc.so.6 => /lib/aarch64-linux-gnu/libc.so.6 (0x0000007fb4f82000)
/lib/ld-linux-aarch64.so.1 (0x0000007fb551b000)
libpthread.so.0 => /lib/aarch64-linux-gnu/libpthread.so.0 (0x0000007fb4f56000)
libdl.so.2 => /lib/aarch64-linux-gnu/libdl.so.2 (0x0000007fb4f41000)
librt.so.1 => /lib/aarch64-linux-gnu/librt.so.1 (0x0000007fb4f2a000)
libnvinfer.so.5 => /usr/lib/aarch64-linux-gnu/libnvinfer.so.5 (0x0000007fabfde000)
libnvinfer_plugin.so.5 => /usr/lib/aarch64-linux-gnu/libnvinfer_plugin.so.5 (0x0000007fabd08000)
libnvparsers.so.5 => /usr/lib/aarch64-linux-gnu/libnvparsers.so.5 (0x0000007fab9c9000)
libnvonnxparser.so.0 => /usr/lib/aarch64-linux-gnu/libnvonnxparser.so.0 (0x0000007fab5a5000)
libopencv_calib3d.so.3.3 => /usr/lib/libopencv_calib3d.so.3.3 (0x0000007fab479000)
libopencv_core.so.3.3 => /usr/lib/libopencv_core.so.3.3 (0x0000007fab1a4000)

run and get result

1
2
3
$ ./build/my-recognition polar_bear.jpg

class 0296 - 0.997434 (ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus)

imagenet-camera

imagenet-camera usage

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
$ ./imagenet-camera --help
usage: imagenet-camera [-h] [--network NETWORK] [--camera CAMERA]
[--width WIDTH] [--height HEIGHT]

Classify a live camera stream using an image recognition DNN.

optional arguments:
--help show this help message and exit
--network NETWORK pre-trained model to load (see below for options)
--camera CAMERA index of the MIPI CSI camera to use (e.g. CSI camera 0),
or for VL42 cameras, the /dev/video device to use.
by default, MIPI CSI camera 0 will be used.
--width WIDTH desired width of camera stream (default is 1280 pixels)
--height HEIGHT desired height of camera stream (default is 720 pixels)

imageNet arguments:
--network NETWORK pre-trained model to load, one of the following:
* alexnet
* googlenet (default)
* googlenet-12
* resnet-18
* resnet-50
* resnet-101
* resnet-152
* vgg-16
* vgg-19
* inception-v4
--model MODEL path to custom model to load (caffemodel, uff, or onnx)
--prototxt PROTOTXT path to custom prototxt to load (for .caffemodel only)
--labels LABELS path to text file containing the labels for each class
--input_blob INPUT name of the input layer (default is 'data')
--output_blob OUTPUT name of the output layer (default is 'prob')
--batch_size BATCH maximum batch size (default is 1)
--profile enable layer profiling in TensorRT

camera type

  • MIPI CSI cameras are used by specifying the sensor index (0 or 1, ect.)
  • V4L2 USB cameras are used by specifying their /dev/video node (/dev/video0, /dev/video1, ect.)

    The default is to use MIPI CSI sensor 0 (--camera=0)

Query the available formats with the following commands:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
$ sudo apt-get install -y v4l-utils
$ v4l2-ctl --list-formats-ext

ioctl: VIDIOC_ENUM_FMT
Index : 0
Type : Video Capture
Pixel Format: 'MJPG' (compressed)
Name : Motion-JPEG
Size: Discrete 1920x1080
Interval: Discrete 0.033s (30.000 fps)
Size: Discrete 160x120
Interval: Discrete 0.033s (30.000 fps)
Size: Discrete 176x144
Interval: Discrete 0.033s (30.000 fps)
Size: Discrete 320x240
Interval: Discrete 0.033s (30.000 fps)
Size: Discrete 352x288
Interval: Discrete 0.033s (30.000 fps)
Size: Discrete 640x360
Interval: Discrete 0.033s (30.000 fps)
Size: Discrete 640x480
Interval: Discrete 0.033s (30.000 fps)
Size: Discrete 1280x720
Interval: Discrete 0.033s (30.000 fps)
Size: Discrete 1280x1024
Interval: Discrete 0.033s (30.000 fps)

Index : 1
Type : Video Capture
Pixel Format: 'YUYV'
Name : YUYV 4:2:2
Size: Discrete 1920x1080
Interval: Discrete 0.200s (5.000 fps)
Size: Discrete 160x120
Interval: Discrete 0.033s (30.000 fps)
Size: Discrete 176x144
Interval: Discrete 0.033s (30.000 fps)
Size: Discrete 320x240
Interval: Discrete 0.033s (30.000 fps)
Size: Discrete 352x288
Interval: Discrete 0.033s (30.000 fps)
Size: Discrete 640x360
Interval: Discrete 0.033s (30.000 fps)
Size: Discrete 640x480
Interval: Discrete 0.033s (30.000 fps)
Size: Discrete 1280x720
Interval: Discrete 0.100s (10.000 fps)
Size: Discrete 1280x1024
Interval: Discrete 0.200s (5.000 fps)

run demo

1
$ ./imagenet-camera --network=resnet-18  --camera=0 --width=640 --height=480

nvpmodel

1
2
3
4
5
6
7
# /etc/nvpmodel.conf

sudo nvpmodel -q –-verbose # 查看当前的模式
sudo nvpmodel -p –-verbose # 打印支持的所有模式及其配置

sudo nvpmodel -m 0 # 启动最高性能,此时所有CPU均已启动,但对应的主频还不是最高的
sudo ~/jetson_clocks.sh # 开启最大频率

Use DeepStream On Jetson Nano

install DeepStream SDK

  • DeepStream SDK 4.0.1

    DeepStream SDK 4.0.1 requires the installation of JetPack 4.2.2.
    donwload deepstream_sdk_v4.0.1_jetson.tbz2 from here

  • DeepStream SDK 4.0.2

    DeepStream SDK 4.0.2 requires the installation of JetPack 4.3.
    donwload deepstream_sdk_v4.0.2_jetson.tbz2 or deepstream-4.0_4.0.2-1_arm64.deb from here

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
# (1) install prerequisite packages for installing the DeepStream SDK 
sudo apt install \
libssl1.0.0 \
libgstreamer1.0-0 \
gstreamer1.0-tools \
gstreamer1.0-plugins-good \
gstreamer1.0-plugins-bad \
gstreamer1.0-plugins-ugly \
gstreamer1.0-libav \
libgstrtspserver-1.0-0 \
libjansson4=2.11-1

sudo apt-get install librdkafka1=0.11.3-1build1

# (1) install deepstream sdk from tar file
tar -xpvf deepstream_sdk_v4.0.2_jetson.tbz2
cd deepstream_sdk_v4.0.2_jetson
sudo tar -xvpf binaries.tbz2 -C /
sudo ./install.sh
sudo ldconfig

# (2) or install deepstream sdk from deb
sudo apt-get install ./deepstream-4.0_4.0.2-1_arm64.deb

## NOTE: sources and samples folders will be found in /opt/nvidia/deepstream/deepstream-4.0


# To boost the clocks
# After you have installed DeepStream SDK,
# run these commands on the Jetson device to boost the clocks:

sudo nvpmodel -m 0
sudo jetson_clocks

running deepstream-app

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
$ deepstream-app --help

Usage:
deepstream-app [OPTION?] Nvidia DeepStream Demo

Help Options:
-h, --help Show help options
--help-all Show all help options
--help-gst Show GStreamer Options

Application Options:
-v, --version Print DeepStreamSDK version
-t, --tiledtext Display Bounding box labels in tiled mode
--version-all Print DeepStreamSDK and dependencies version
-c, --cfg-file Set the config file
-i, --input-file Set the input file

deepstream-app -c

export GST_PLUGIN_PATH=”/usr/lib/aarch64-linux-gnu/gstreamer-1.0/“

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
/opt/nvidia/deepstream/deepstream-4.0/sources/apps/sample_apps/deepstream-test1$ deepstream-test1-app ~/video/pengpeng.avi 


cd /opt/nvidia/deepstream/deepstream-4.0/samples/configs/deepstream-app
deepstream-app -c config_infer_primary_nano.txt

# error occurs
** ERROR: <create_multi_source_bin:682>: Failed to create element 'src_bin_muxer'
** ERROR: <create_multi_source_bin:745>: create_multi_source_bin failed
** ERROR: <create_pipeline:1045>: create_pipeline failed
** ERROR: <main:632>: Failed to create pipeline
Quitting
App run failed

# solutions
rm ~/.cache/gstreamer-1.0/registry.aarch64.bin

export DISPLAY=:1
deepstream-app -c config_infer_primary_nano.txt

(deepstream-app:16051): GStreamer-CRITICAL **: 16:31:26.057: gst_element_get_static_pad: assertion 'GST_IS_ELEMENT (element)' failed
Segmentation fault (core dumped)

Reference

History

  • 2019/12/09: created.

Guide

1
2
3
4
5
6
7
8
9
10
11
mylinks:

- nickname: 官方博客
avatar: https://kezunlin.me/images/kezunlin_avatar.jpg
site: https://kezunlin.me
info: C++ && Python. CV && DL.

- nickname: 闪烁之狐
avatar: https://blinkfox.github.io/medias/logo.png
site: https://blinkfox.github.io/
info: blinkfox

output.json

1
2
3
4
5
6
7
8
9
10
11
12
13
[{
"avatar": "https://kezunlin.me/images/kezunlin_avatar.jpg",
"name": "官方博客",
"introduction": "C++ && Python. CV && DL.",
"url": "https://kezunlin.me",
"title": "前去学习"
}, {
"avatar": "https://blinkfox.github.io/medias/logo.png",
"name": "闪烁之狐",
"introduction": "blinkfox",
"url": "https://blinkfox.github.io/",
"title": "前去学习"
}]

code

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import yaml 
import json

result_list = []
with open('./links.yaml') as f:
obj = yaml.load(f) # dict
#print(obj["mylinks"])

for item in obj["mylinks"]:
if 0:
print(item["nickname"])
print(item["avatar"])
print(item["site"])
print(item["info"])
new_item = {
"avatar": item["avatar"],
"name": item["nickname"],
"introduction": item["info"],
"url": item["site"],
"title": "前去学习"
}
result_list.append(new_item)

str_pretty_result = json.dumps(
result_list, indent=4,
sort_keys=True, ensure_ascii=False)

with open("friends.json","w") as f:
f.write(str_pretty_result)

Reference

History

  • 2019/11/29: created.

Guide

flip

1
2
3
4
5
6
7
8
9
10
11
12
13
14
import cv2
image = cv2.imread("demo.jpg")

# Flipped Horizontally 水平翻转
h_flip = cv2.flip(image, 1)
cv2.imwrite("demo-h.jpg", h_flip)

# Flipped Vertically 垂直翻转
v_flip = cv2.flip(image, 0)
cv2.imwrite("demo-v.jpg", v_flip)

# Flipped Horizontally & Vertically 水平垂直翻转
hv_flip = cv2.flip(image, -1)
cv2.imwrite("demo-hv.jpg", hv_flip)

rotate

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
def rotate_anti_90(image):
image = cv2.transpose(image)
image = cv2.flip(image, 0)
return image

def rotate_anti_180(image):
image = cv2.flip(image, 0)
image = cv2.flip(image, 1)
return image

def rotate_anti_270(image):
image = cv2.transpose(image)
image = cv2.flip(image, 1)
return image

def rotate(image, angle, center=None, scale=1.0):
# rotate by angle
(h, w) = image.shape[:2] # hwc
if center is None:
center = (w / 2., h / 2.)

M = cv2.getRotationMatrix2D(center, angle, scale)

rotated = cv2.warpAffine(image, M, (w, h))
return rotated

compression

1
2
3
cv2.imwrite(full_image_path, image, [int( cv2.IMWRITE_JPEG_QUALITY), 100]) # no compression for jpg
# [int(cv2.IMWRITE_PNG_COMPRESSION), 9] 0-9 for png
# [int( cv2.IMWRITE_JPEG_QUALITY), 100] 0-100 for jpg

get video info

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import datetime
import cv2
from moviepy.editor import VideoFileClip
import numpy as np

def get_video_info(video_path):
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
return

frame_number = cap.get(cv2.CAP_PROP_FRAME_COUNT)
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
fps = int(cap.get(cv2.CAP_PROP_FPS))

cap.release() # release video capture

print("fps = ",fps)
print("frame_number = ",frame_number)
size = (w,h)
print("size = ",size)

# frame number MAY BE WRONG ! so video time may be also wrong!
duration = int(frame_number / fps)
print("seconds=",duration)
video_time = str(datetime.timedelta(seconds = duration))
print("video_time=",video_time)

print("-----------------------using VideoFileClip------------------")
clip = VideoFileClip(video_path)
duration = clip.duration
print("video duration is "+ str(duration) + " seconds")
video_time = str(datetime.timedelta(seconds = int(duration)))
print("video_time=",video_time)

def clip_video():
clip = VideoFileClip("1.mp4")
starting_point = 120 # start at second minute
end_point = 420 # record for 300 seconds (120+300)
subclip = clip.subclip(starting_point, end_point)
subclip.write_videofile("/path/to/new/video.mp4")

numpy argmax

numpy argmax for 2-dim and 3-dim

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
import numpy as np

# for 2-dim
array = np.array([
[1,2,3],
[4,5,6],
[9,8,7],
[1,2,3],
[10,1,2]
])
print("array.shape=",array.shape)
result1 = array.argmax(axis=0) # hw axis-0 ===> h shape=(w,) value range[0,1,2,3,4]
result2 = array.argmax(axis=1) # hw axis-0 ===> w shape=(h,) value range[0,1,2]
print(result1)
print(result1.shape)
print(result2)
print(result2.shape)

output

('array.shape=', (5, 3))
[4 2 2]
(3,)
[2 2 0 2 0]
(5,)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# for 3-dim
array2 = np.array([
[3,2,1],
[6,5,4],
[7,8,9],
[1,2,3],
[1,1,10]
])
image = np.array([
array,array2
])
print("image.shape=",image.shape)
print(image)

out = image.argmax(axis=0) # chw axis=0 ===> c shape=(h,w) value range[0,1]
print(out)
print(out.shape)
print(out.dtype)

output

('image.shape=', (2, 5, 3))
[[[ 1  2  3]
  [ 4  5  6]
  [ 9  8  7]
  [ 1  2  3]
  [10  1  2]]

 [[ 3  2  1]
  [ 6  5  4]
  [ 7  8  9]
  [ 1  2  3]
  [ 1  1 10]]]
[[1 0 0]
 [1 0 0]
 [0 0 1]
 [0 0 0]
 [0 0 1]]
(5, 3)
int64
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# for view segmentation output
output = np.load('output.npy')
print("output.shape=",output.shape) # 1, 2, 512, 512
print("output.dtype=",output.dtype)

image = output[0]
print("image.shape=",image.shape) # 2, 512, 512
print("image.dtype=",image.dtype)

out = image.argmax(axis=0) # chw axis=0 ===> c
print("out.shape=",out.shape) # 512, 512
print("out.dtype=",out.dtype) # int64

print(out.min())
print(out.max())

output

('output.shape=', (1, 2, 512, 512))
('output.dtype=', dtype('float32'))
('image.shape=', (2, 512, 512))
('image.dtype=', dtype('float32'))
('out.shape=', (512, 512))
('out.dtype=', dtype('int64'))
0
1

pandas quantile

basic

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
"""
for array of length n:
1. pos = 1 + (n-1)*p
2. get integer part and fraction part of pos: i, f
3. return a[i-1] + (a[i]-a[i-1])*f
"""
import math
def quantile_p(data, p, method=1):
data.sort()
if method == 2:
pos = 1 + (len(data)-1)*p
else:
pos = (len(data) + 1)*p
i = int(math.modf(pos)[1])
f = pos - i
Q = data[i-1] + (data[i]-data[i-1])*f
Q1 = quantile_p(data, 0.25)
Q2 = quantile_p(data, 0.5)
Q3 = quantile_p(data, 0.75)
IQR = Q3 - Q1
return Q1, Q2, Q3, IQR

quantile

1
2
3
4
5
6
7
8
import pandas as pd
import numpy as np
df = pd.Series(np.array([6, 47, 49, 15, 42, 41, 7, 39, 43, 40, 36])

print(dt)
print('Q1:', df.quantile(.25))
print('Q2:', df.quantile(.5))
print('Q3:', df.quantile(.75))

pandas use method 2: pos = 1 + (n-1)*p

image to/from base64 string

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import json
import base64

def get_base64_str_from_file(filepath):
with open(filepath, "rb") as f:
bytes_content = f.read() # bytes
bytes_64 = base64.b64encode(bytes_content)
return bytes_64.decode('utf-8') # bytes--->str (remove `b`)

def save_base64_str_to_file(str_base64, to_file):
bytes_64 = str_base64.encode('utf-8') # str---> bytes (add `b`)
bytes_content = base64.decodebytes(bytes_64) # bytes
with open(to_file, "wb") as f:
f.write(bytes_content)

def test_base64():
# image to/from base64
image_path = "images/1.jpg"
str_base64 = get_base64_str_from_file(image_path)
save_base64_str_to_file(str_base64, "images/2.jpg")
print("OK")

if __name__ == "__main__":
test_base64()

output

OK

normal string to/from base64 string

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
def str_to_base64(normal_str):
bytes_str = normal_str.encode('utf-8') # str ===> bytes
bytes_64 = base64.b64encode(bytes_str) # bytes ===> bytes
return bytes_64.decode('utf-8') # bytes ===> str

def base64_to_str(base64_str):
bytes_64 = base64_str.encode('utf-8') # str ===> bytes
bytes_content = base64.decodebytes(bytes_64) # bytes ===> bytes
return bytes_content.decode('utf-8') # bytes ===> str

def test_base64():
normal_str = "Hello World !"
str_base64 = str_to_base64(normal_str)
normal_str2 = base64_to_str(str_base64)
print("normal_str = ",normal_str)
print("str_base64 = ",str_base64)
print("normal_str2 = ",normal_str2)

if __name__ == "__main__":
test_base64()

output

normal_str =  Hello World !
str_base64 =  SGVsbG8gV29ybGQgIQ==
normal_str2 =  Hello World !

json loads and dumps

1
2
3
4
5
6
7
8
9
10
11
12
13
14
import json

str_text='{"status":0,"msg":"成功"}'
dict_json = json.loads(str_text)

print(type(dict_json)) # dict
print(dict_json)

str_pretty_result = json.dumps(
dict_json, indent=4,
sort_keys=True, ensure_ascii=False)

print(type(str_pretty_result)) # str
print(str_pretty_result)

output

<class 'dict'>
{'status': 0, 'msg': '成功'}
<class 'str'>
{
    "msg": "成功",
    "status": 0
}

str to dict
dict to str

datetime

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
def demo_string_datetime():
# date_time = now.strftime("%m/%d/%Y, %H:%M:%S")

datetime_str = '09/19/18 13:55:26'
datetime_object = datetime.datetime.strptime(datetime_str, '%m/%d/%y %H:%M:%S')

print(type(datetime_object))
print(datetime_object) # printed in default format

datetime_str = '19910403'
datetime_object = datetime.datetime.strptime(datetime_str, '%Y%m%d')

print(type(datetime_object))
print(datetime_object) # printed in default format

datetime_result_str = datetime_object.strftime('%Y%m%d')
print(datetime_result_str)

simplekml

1
2
3
4
5
import simplekml
kml = simplekml.Kml()
kml.newpoint(name="point a", coords=[(18.432314,-33.988862)]) # lon, lat, optional height
kml.newpoint(name="point b", coords=[(28.432314,-43.988862)]) # lon, lat, optional height
kml.save("1.kml")

1.kml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2" xmlns:gx="http://www.google.com/kml/ext/2.2">
<Document id="1">
<Placemark id="3">
<name>point a</name>
<Point id="2">
<coordinates>18.432314,-33.988862,0.0</coordinates>
</Point>
</Placemark>
<Placemark id="5">
<name>point b</name>
<Point id="4">
<coordinates>28.432314,-43.988862,0.0</coordinates>
</Point>
</Placemark>
</Document>
</kml>

python requests

install

pip install requests
conda install requests

usage

>>> requests.get('https://httpbin.org/get')
>>> requests.post('https://httpbin.org/post', data={'key':'value'})
>>> requests.put('https://httpbin.org/put', data={'key':'value'})
>>> requests.delete('https://httpbin.org/delete')
>>> requests.head('https://httpbin.org/get')
>>> requests.patch('https://httpbin.org/patch', data={'key':'value'})
>>> requests.options('https://httpbin.org/get')

code example

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import requests
import json

data = {"name":"admin",
"password":"21232f297a57a5a743894a0e4a801fc3"}
r = requests.post(
"127.0.0.1/api/login",
headers={"Accept": "application/json",
"Content-Type": "application/json"},
data=json.dumps(data)
)
print(r.text)

r = requests.post(
"127.0.0.1/api/login",
headers={"Accept": "application/json",
"Content-Type": "application/json"},
json=data
)
print(r.text)


session_id = "[email protected]"
myheaders={"Accept": "application/json",
"Content-Type": "application/json",
"session_id":session_id}
r=requests.get(
"127.0.0.1/api/book",
headers=myheaders
)
print(r.text)

requests-html

requests-html for human

install

pip install requests-html

usage

1
2
3
4
5
>>> from requests_html import HTMLSession
>>> session = HTMLSession()
>>> r = session.get('https://python.org/')
>>> r.text
>>> r.html.find('title', first=True).text

selenium chromedriver

versions

  • Selenium – version 3.11.0
  • Chrome Browser – version 77
  • ChromeDriver – version 77

steps

  1. download and install chrome browser 77
  2. download ChromeDriver for Chrome Browser 77
  3. install and check version

commands

sudo cp chromedriver /usr/local/bin
chromedriver -v
ChromeDriver 77.0.3865.40

smote

using smote to over sampling datasets

install

pip install smote_variants
pip install imbalanced_databases

class member vs instance member

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
#!/usr/bin/python
# -*- coding: UTF-8 -*-

import time

class Info(object):
rects = [] # by ref (object): all instances has only 1 same copy.
counter = 0 # by value: all instances has 1 different copy
def __init__(self):
self.new_rects = []
self.new_counter = 0

def func():
it = Info()
return it

def test1():
info = func()

info.rects.extend([1,2,3,4,5])
info.counter += 1
info.new_rects.extend([1,2,3,4,5])
info.new_counter += 1

print("rect size", len(info.rects),len(info.new_rects))
print("counter", info.counter,info.new_counter)
print("--------------------------")

info2 = func()
info2.rects.extend([1,2,3,4,5])
info2.counter += 1
info2.new_rects.extend([1,2,3,4,5])
info2.new_counter += 1

print("rect size", len(info2.rects),len(info2.new_rects))
print("counter", info2.counter,info2.new_counter)
print("--------------------------")



if __name__ == "__main__":
test1()

"""
rect size 5 5
counter 1 1
--------------------------
rect size 10 5
counter 1 1
--------------------------
"""

pyyaml

install

pip install pyyaml
pip freeze > requirements.txt

usage
cfg.yaml

1
2
3
4
5
---
# cfg.yaml
debug: true # debug or not (default: false)
input_dir: "./input/" # input dir
output_dir: "./output/" # output dir

code

1
2
3
4
5
6
7
8
9
10
import yaml 

def load_cfg(cfg_path='./cfg.yaml'):
cfg = None
if os.path.exists(cfg_path):
cfg = yaml.load(open(cfg_path))
input_dir = cfg.get("input_dir")
else:
print("{} not exist".format(cfg_path))
return cfg

cupy

CuPy: NumPy-like API accelerated with CUDA.
CuPy: numpy on GPU

install

(For CUDA 8.0)
% pip install cupy-cuda80

(For CUDA 9.0)
% pip install cupy-cuda90

(For CUDA 9.1)
% pip install cupy-cuda91

(For CUDA 9.2)
% pip install cupy-cuda92

(For CUDA 10.0)
% pip install cupy-cuda100

(For CUDA 10.1)
% pip install cupy-cuda101

(Install CuPy from source)
% pip install cupy

usage

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
>>> import cupy as cp
>>> x = cp.arange(6).reshape(2, 3).astype('f')
>>> x
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> x.sum(axis=1)
array([ 3., 12.], dtype=float32)



>>> x = cp.arange(6, dtype='f').reshape(2, 3)
>>> y = cp.arange(3, dtype='f')
>>> kernel = cp.ElementwiseKernel(
... 'float32 x, float32 y', 'float32 z',
... '''if (x - 2 > y) {
... z = x * y;
... } else {
... z = x + y;
... }''', 'my_kernel')
>>> kernel(x, y)
array([[ 0., 2., 4.],
[ 0., 4., 10.]], dtype=float32)

SORT

SORT: A Simple, Online and Realtime Tracker based on Kalman

code

1
2
3
4
5
6
7
8
9
10
11
12
13
from sort import *

#create instance of SORT
mot_tracker = Sort()

# get detections
...

# update SORT
track_bbs_ids = mot_tracker.update(detections)

# track_bbs_ids is a np array where each row contains a valid bounding box and track_id (last column)
...

Reference

History

  • 2019/11/08: created.

loop 2d array

Why does the order of the loops affect performance when iterating over a 2D array ?

When fetching a certain element of a matrix from memory, elements near it(15 ints) will be fetched as well and stored in a cache line(64 bytes = 16 ints).

For simplicity, assume the cache comprises a single cache line which can contain 2 matrix elements and that when a given element is fetched from memory, the next one is too. Say we want to take the sum over all elements in the example 2x2 matrix above (lets call it M):

Exploiting the ordering (e.g. changing column index first in c++):

M[0][0] (memory) + M[0][1] (cached) + M[1][0] (memory) + M[1][1] (cached)
= 1 + 2 + 3 + 4
--> 2 cache hits, 2 memory accesses

Not exploiting the ordering (e.g. changing row index first in c++):

M[0][0] (memory) + M[1][0] (memory) + M[0][1] (memory) + M[1][1] (memory)
= 1 + 3 + 2 + 4
--> 0 cache hits, 4 memory accesses

Reference

History

  • 2019/11/08: created.

Guide

use article publisher artipub to publish your article to multiple platforms

  • csdn
  • cnblog
  • zhihu
  • jianshu
  • segmentfault
  • toutiao

install

1
2
sudo apt-get -y install mongodb
sudo npm install -g artipub --registry=https://registry.npm.taobao.org

usage

start server

1
artipub start

by default, artipub will use localhost:27017/artipub as connection to mongodb.

check mongodb

1
2
3
4
5
6
$ mongo
> show dbs
admin (empty)
artipub 0.078GB
db (empty)
local 0.078GB

now access http://localhost:8000

Reference

History

  • 2019/11/08: created.

Code Example

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <vector>
#include <iostream>
using namespace std;

class Integer
{
public:
Integer(int value): v(value)
{
cout << "default constructor" << endl;
}
Integer(const Integer &other)
{
cout << "copy constructor" << endl;
v = other.v;
}
Integer &operator=(const Integer &other)
{
cout << "copy assignment" << endl;
v = other.v;
return *this;
}

// ++i first +1,then return new value
Integer &operator++()
{
cout << "Integer::operator++()" << endl;
v++;
return *this;
}

// i++ first save old value,then +1,last return old value
Integer operator++(int)
{
cout << "Integer::operator++(int)" << endl;
Integer old = *this;
v++;
return old;
}

void output()
{
cout << "value " << v << endl;
}
private:
int v;
};

void test_case()
{
Integer obj(10);
Integer obj2 = obj;
Integer obj3(0);
obj3 = obj;
cout << "--------------" << endl;
cout << "++i" << endl;
++obj;
obj.output();
cout << "i++" << endl;
obj++;
obj.output();
}

int main()
{
test_case();
return 0;
}

output

default constructor
copy constructor
default constructor
copy assignment
--------------
++i
Integer::operator++()
value 11
i++
Integer::operator++(int)
copy constructor
value 12

Reference

History

  • 2019/11/08: created.