0%

Series

Guide

apt-get

1
2
sudo apt-get install libgtest-dev
cd /usr/src/gtest # only source files

compile

1
2
3
4
5
6
7
8
9
git clone https://github.com/google/googletest.git
wget https://github.com/google/googletest/archive/release-1.8.0.tar.gz

cd googletest
mkdir build
cd build
cmake-gui ..
make -j8
sudo make install

options:

BUILD_SHARED_LIBS=ON

install to /usr/local/

  • for static library, we use libgtest.a
  • for shared library, use use ligbtest.so.

gtest-config.cmake

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
# - Try to find GTEST
#
# The following variables are optionally searched for defaults
# GTEST_ROOT_DIR: Base directory where all GTEST components are found
#
# The following are set after configuration is done:
# GTEST_FOUND
# GTEST_INCLUDE_DIRS
# GTEST_LIBRARIES
# GTEST_LIBRARYRARY_DIRS

include(FindPackageHandleStandardArgs)

set(GTEST_ROOT_DIR "" CACHE PATH "Folder contains Google gtest")

if(WIN32)
find_path(GTEST_INCLUDE_DIR gtest/gtest.h
PATHS ${GTEST_ROOT_DIR})
else()
find_path(GTEST_INCLUDE_DIR gtest/gtest.h
PATHS ${GTEST_ROOT_DIR})
endif()

if(MSVC)
find_library(GTEST_LIBRARY_RELEASE gtest
PATHS ${GTEST_ROOT_DIR}
PATH_SUFFIXES Release)
find_library(GTEST_MAIN_LIBRARY_RELEASE gtest_main
PATHS ${GTEST_ROOT_DIR}
PATH_SUFFIXES Release)

find_library(GTEST_LIBRARY_DEBUG gtest
PATHS ${GTEST_ROOT_DIR}
PATH_SUFFIXES Debug)
find_library(GTEST_MAIN_LIBRARY_DEBUG gtest_main
PATHS ${GTEST_ROOT_DIR}
PATH_SUFFIXES Debug)

set(GTEST_LIBRARY optimized ${GTEST_LIBRARY_RELEASE} ${GTEST_MAIN_LIBRARY_RELEASE} debug ${GTEST_LIBRARY_DEBUG} ${GTEST_MAIN_LIBRARY_DEBUG})
else()
find_library(GTEST_LIBRARY gtest
PATHS ${GTEST_ROOT_DIR}
PATH_SUFFIXES lib lib64)

find_library(GTEST_MAIN_LIBRARY gtest_main
PATHS ${GTEST_ROOT_DIR}
PATH_SUFFIXES lib lib64)
endif()

find_package_handle_standard_args(GTEST DEFAULT_MSG GTEST_INCLUDE_DIR GTEST_LIBRARY GTEST_MAIN_LIBRARY)

if(GTEST_FOUND)
set(GTEST_INCLUDE_DIRS ${GTEST_INCLUDE_DIR})
set(GTEST_LIBRARIES ${GTEST_LIBRARY} ${GTEST_MAIN_LIBRARY})
message(STATUS "Found gtest (include: ${GTEST_INCLUDE_DIRS}, library: ${GTEST_LIBRARIES})")
mark_as_advanced(GTEST_ROOT_DIR GTEST_LIBRARY_RELEASE GTEST_LIBRARY_DEBUG
GTEST_MAIN_LIBRARY_RELEASE GTEST_MAIN_LIBRARY_DEBUG
GTEST_LIBRARY GTEST_MAIN_LIBRARY GTEST_INCLUDE_DIR)
endif()

Example

code

whattotest.cpp

1
2
3
4
5
6
7
8
9
10
#include <math.h>

double squareRoot(const double a) {
double b = sqrt(a);
if(b != b) { // nan check
return -1.0;
}else{
return sqrt(a);
}
}

tests.cpp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
#include "whattotest.cpp"
#include <gtest/gtest.h>

TEST(SquareRootTest, PositiveNos) {
ASSERT_EQ(6, squareRoot(36.0));
ASSERT_EQ(18.0, squareRoot(324.0));
ASSERT_EQ(25.4, squareRoot(645.16));
ASSERT_EQ(0, squareRoot(0.0));
}

TEST(SquareRootTest, NegativeNos) {
ASSERT_EQ(-1.0, squareRoot(-15.0));
ASSERT_EQ(-1.0, squareRoot(-0.2));
}

int main(int argc, char **argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

CMakeLists.txt

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
cmake_minimum_required(VERSION 2.6)

if(MSVC)
SET(GTEST_ROOT "C:/Program Files/gtest")
else()
# BOOST_THREAD_LIBRARY /usr/lib/x86_64-linux-gnu/libpthread.so
MESSAGE( [Main] " BOOST_THREAD_LIBRARY = ${BOOST_THREAD_LIBRARY}")
endif(MSVC)

find_package(GTest REQUIRED) # GTest 1.8.0
include_directories(${GTEST_INCLUDE_DIRS})

# Link runTests with what we want to test and the GTest and pthread library
add_executable(runTests tests.cpp)
target_link_libraries(runTests ${GTEST_LIBRARIES} ${BOOST_THREAD_LIBRARY})
#target_link_libraries(runTests gtest pthread)

compile

1
2
mkdir build && cd build && cmake-gui ..
make -j8

run

1
./runTests

use -lgtest as linker flag and optionally, if you did not write your own test mainroutine, the explicit -lgtest_main flag.

gtest use pthread, so we need -lpthread as well.

1
-lgtest -lgtest_main -lpthread

Reference

History

  • 20180118: created.
  • 20180122: add shared library part.

Guide

format flags

second millisecond microsecond nanosecond

Format Flags

  • %f
    Fractional seconds are always used, even when their value is zero
    “13:15:16.000000”

  • %F *
    Fractional seconds are used only when their value is not zero.
    “13:15:16”
    “05:04:03.001234”

to string

The Boost.Date_Time library provides the following ptime to std::string conversions within the boost::posix_time namespace:

  • std::string to_simple_string(ptime) returns a string in the form of YYYY-mmm-DD HH:MM:SS.fffffffff format where mmm is the three character month name.
  • std::string to_iso_string(ptime) returns a string in the form of YYYYMMDDTHHMMSS,fffffffff where T is the date-time separator.
  • std::string to_iso_extended_string(ptime) returns a string in the form of YYYY-MM-DDTHH:MM:SS,fffffffff where T is the date-time separator.

date_time.cpp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
#include <iostream>  
#include <sstream>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/date_time/gregorian/gregorian.hpp>
using namespace std;
using namespace boost::posix_time;
using namespace boost::gregorian;

/*
second millisecond microsecond nanosecond


Format Flags
%f
Fractional seconds are always used, even when their value is zero
"13:15:16.000000"

%F *
Fractional seconds are used only when their value is not zero.
"13:15:16"
"05:04:03.001234"
*/

std::string ptime_2_str_name(boost::posix_time::ptime now)
{
// https://stackoverflow.com/questions/5018188/how-to-format-a-datetime-to-string-using-boost
// for multiple use

std::stringstream ss;
//static std::locale loc(std::cout.getloc(), new time_facet("%Y%m%d_%H%M%S_%f"));
static std::locale loc(ss.getloc(), new time_facet("%Y%m%d_%H%M%S_%f"));
ss.imbue(loc);
ss << now;
return ss.str(); // 20180118_111501_208797
}

std::string str_name_2_iso_format(std::string str_name)
{
/*
20180118_111501_208797 ===> 20180118T111501.208797===>from_iso_string===>ptime
*/
//std::cout << "time length: " << str_ptime.length() << endl; //22
size_t first_pos = str_name.find_first_of('_');
size_t second_pos = str_name.find_last_of('_');
str_name[first_pos] = 'T';
str_name[second_pos] = '.';
return str_name;
}

ptime from_name_string(std::string str_ptime)
{
std::string str_iso_ptime = str_name_2_iso_format(str_ptime);
return from_iso_string(str_iso_ptime);
}


int main()
{
ptime p1(date(2001, 1, 1), hours(1));
ptime p2 = time_from_string("2002-2-2 02:00:00.999888"); // fraction part: 6 bits
ptime p3 = from_iso_string("20030303T031233.777666"); // fraction part: 6 bits
ptime p4 = second_clock::local_time(); // in second
ptime p5 = microsec_clock::universal_time(); // UTC World time in millisecond,microsecond
ptime p6 = microsec_clock::local_time(); // UTC local time in millisecond,microsecond

cout << p1 << endl
<< p2 << endl
<< p3 << endl
<< p4 << endl
<< p5 << endl
<< p6 << endl << endl;

/*
* date()
* time_of_day()
*/
date d = p1.date();
time_duration td = p1.time_of_day();
cout << d << ", " << td << endl << endl;

cout << to_simple_string(p2) << endl //YYYY-mmm-DD HH:MM:SS.ffffff
<< to_iso_string(p2) << endl //YYYYMMDDTHHMMSS,ffffff
<< to_iso_extended_string(p2) << endl; //YYYY-MM-DDTHH:MM:SS,ffffff

cout << "User defined format time:" << endl;
std::string str_name = ptime_2_str_name(p6);
ptime p7 = from_name_string(str_name);
cout <<" p6: "<< ptime_2_str_name(p6) << endl;
cout <<"p7 from String2Ptime(p6): "<< ptime_2_str_name(p7) << endl;

return 0;
}

CMakeLists.txt

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
# Specify the minimum version for CMake
cmake_minimum_required(VERSION 2.8)

# Project's name
project(date_time)

# Set the output folder where your program will be created
set(CMAKE_BINARY_DIR ${CMAKE_SOURCE_DIR}/bin)
set(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR})
set(LIBRARY_OUTPUT_PATH ${CMAKE_BINARY_DIR})

#find_package(Boost 1.5.8 REQUIRED COMPONENTS date_time system filesystem iostreams)
find_package(Boost 1.5.8 REQUIRED COMPONENTS date_time)
include_directories(${Boost_INCLUDE_DIRS})
link_directories(${Boost_LIBRARY_DIRS})

# The following folder will be included
include_directories("${PROJECT_SOURCE_DIR}")

add_executable(date_time ${PROJECT_SOURCE_DIR}/date_time.cpp)
target_link_libraries(date_time ${Boost_LIBRARIES})

run and output

compile program and run

1
2
3
4
5
6
7
mkdir build
cd build
cmake ..
make

cd bin
./date_time

output

2001-Jan-01 01:00:00
2002-Feb-02 02:00:00.999888
2003-Mar-03 03:12:33.777666
2018-Jan-18 15:20:47
2018-Jan-18 07:20:47.815415
2018-Jan-18 15:20:47.815419

2001-Jan-01, 01:00:00

2002-Feb-02 02:00:00.999888
20020202T020000.999888
2002-02-02T02:00:00.999888
User defined format time:
                      p6: 20180118_152047_815419
p7 from String2Ptime(p6): 20180118_152047_815419

Reference

History

  • 20180118: created.

Series

Guide

  • qt: 5.7.0
  • qmake: 3.0
  • qtcreator: 3.5.1
  • vtk: 8.1.0 (source)
  • pcl :1.8.1 (source)

Setup Prerequisites

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
sudo apt-get update
sudo apt-get install git build-essential linux-libc-dev
sudo apt-get install cmake cmake-gui
sudo apt-get install libusb-dev libudev-dev
sudo apt-get install mpi-default-dev openmpi-bin openmpi-common

sudo apt-get install libpcap-dev
sudo apt-get install libflann1.8 libflann-dev
sudo apt-get install libeigen3-dev
sudo apt-get install libopenni2-dev
sudo apt-get install libqhull7 libqhull-dev

sudo apt-get install freeglut3-dev pkg-config
sudo apt-get install libxmu-dev libxi-dev
sudo apt-get install mono-complete
sudo apt-get install openjdk-8-jdk openjdk-8-jre

metslib

for cmake error:

no metslib found.

fix by

1
2
3
4
5
6
wget https://www.coin-or.org/download/source/metslib/metslib-0.5.3.tgz
tar xzvf metslib-0.5.3.tgz
cd metslib-0.5.3
./configure
make
sudo make install

glxinfo

install

1
2
sudo apt-get install mesa-utils
glxinfo

possible error

X Error of failed request: BadRequest (invalid request code or no such operation)
Major opcode of failed request: 154 (GLX)
Minor opcode of failed request: 34 ()
Serial number of failed request: 34
Current serial number in output stream: 33

fix,make sure NVIDIA drivers are installed successfully and no conflict.
check display OpenGL info

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
$ glxinfo | grep OpenGL

OpenGL vendor string: NVIDIA Corporation
OpenGL renderer string: GeForce GTX 1060/PCIe/SSE2
OpenGL core profile version string: 4.5.0 NVIDIA 384.90
OpenGL core profile shading language version string: 4.50 NVIDIA
OpenGL core profile context flags: (none)
OpenGL core profile profile mask: core profile
OpenGL core profile extensions:
OpenGL version string: 4.5.0 NVIDIA 384.90
OpenGL shading language version string: 4.50 NVIDIA
OpenGL context flags: (none)
OpenGL profile mask: (none)
OpenGL extensions:
OpenGL ES profile version string: OpenGL ES 3.2 NVIDIA 384.90
OpenGL ES profile shading language version string: OpenGL ES GLSL ES 3.20
OpenGL ES profile extensions:

It means we use OpenGL version 4.5.0 for NVIDIA display.

Or by nvidia-settings, NVIDIA X Server Settings–> X Screen 0 –> OpenGL/GLX Information.

compile

1
2
3
4
5
6
wget https://github.com/PointCloudLibrary/pcl/archive/pcl-1.8.1.tar.gz

cd pcl
mkdir build
cd build
cmake-gui ..

with options

 QT_USE_FILE /home/kezunlin/program/pcl-1.8.1/build/use-qt5.cmake
 
 VTK_DIR  /usr/local/lib/cmake/vtk-8.1
 
 CMAKE_BUILD_TYPE       Release
 CMAKE_CONFIGURATION_TYPES Release
 CMAKE_INSTALL_PREFIX  /usr/local
 
 PCL_SHARED_LIBS       ON
 PCL_QT_VERSION        5
 PCL_ENABLE_SSE        ON
 
 Build_visualization ON
 Build_apps ON
 Build_examples OFF # error may occur

Using CPU native flags for SSE optimization: -march=native

make and install

1
2
3
# it may take several minutes, wait ...
make -j8
sudo make -j8 install

cmake install to /usr/local/bin, /usr/local/lib/, /usr/local/include/pcl-1.8
PCL_DIR will be /usr/local/share/pcl-1.8

error for example

/home/kezunlin/program/pcl-1.8.1/examples/segmentation/example_cpc_segmentation.cpp:493:17: error: ‘class vtkUnsignedCharArray’ has no member named ‘InsertNextTupleValue’
         colors->InsertNextTupleValue (color);

so cmake with options

Build_examples OFF

Test pcl_viewer

test pcl_viewer

1
/usr/local/bin/pcl_viewer ~/program/pcl-1.8.1/test/car6.pcd

Success.

car

Tips: screen snapshot by gnome-screenshot -a.

Cloud Viewer

cloud_viewer.cpp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
#include <pcl/visualization/cloud_viewer.h>
#include <iostream>
#include <pcl/io/io.h>
#include <pcl/io/pcd_io.h>

int user_data;

void
viewerOneOff (pcl::visualization::PCLVisualizer& viewer)
{
viewer.setBackgroundColor (1.0, 0.5, 1.0);
pcl::PointXYZ o;
o.x = 1.0;
o.y = 0;
o.z = 0;
viewer.addSphere (o, 0.25, "sphere", 0);
std::cout << "i only run once" << std::endl;

}

void
viewerPsycho (pcl::visualization::PCLVisualizer& viewer)
{
static unsigned count = 0;
std::stringstream ss;
ss << "Once per viewer loop: " << count++;
viewer.removeShape ("text", 0);
viewer.addText (ss.str(), 200, 300, "text", 0);

//FIXME: possible race condition here:
user_data++;
}

int
main ()
{
// car6 x y z
// colored_cloud x y z rbga
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZRGBA>);
pcl::io::loadPCDFile ("colored_cloud.pcd", *cloud);

pcl::visualization::CloudViewer viewer("Cloud Viewer");

//blocks until the cloud is actually rendered
viewer.showCloud(cloud);

//use the following functions to get access to the underlying more advanced/powerful
//PCLVisualizer

//This will only get called once
viewer.runOnVisualizationThreadOnce (viewerOneOff);

//This will get called once per visualization iteration
viewer.runOnVisualizationThread (viewerPsycho);
while (!viewer.wasStopped ())
{
//you can also do cool processing here
//FIXME: Note that this is running in a separate thread from viewerPsycho
//and you should guard against race conditions yourself...
user_data++;
}
return 0;
}

/*
http://pointclouds.org/documentation/tutorials/cloud_viewer.php
http://pointclouds.org/documentation/tutorials/pcl_visualizer.php
http://docs.pointclouds.org/1.7.0/structpcl_1_1_point_x_y_z_r_g_b.html
*/

CMakeLists.txt

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
cmake_minimum_required(VERSION 2.8 FATAL_ERROR)

project(cloud_viewer)

# set bin folder
set(CMAKE_BINARY_DIR ${CMAKE_SOURCE_DIR}/bin)
set(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR})
set(LIBRARY_OUTPUT_PATH ${CMAKE_BINARY_DIR})

find_package(PCL 1.8.1 REQUIRED)

#message( [***] ${PCL_INCLUDE_DIRS})
#message( [***] ${PCL_LIBRARY_DIRS})
#message( [***] ${PCL_DEFINITIONS})
#message( [***] ${PCL_LIBRARIES})

include_directories(${PCL_INCLUDE_DIRS})
link_directories(${PCL_LIBRARY_DIRS})
add_definitions(${PCL_DEFINITIONS})

add_executable (cloud_viewer cloud_viewer.cpp)
target_link_libraries (cloud_viewer ${PCL_LIBRARIES})

compile

1
2
3
4
mkdir build
cd build
cmake ..
make

run demo

bin folder

1
2
3
4
5
6
7
$ tree bin/

bin/
├── cloud_viewer
└── colored_cloud.pcd

0 directories, 2 files

run demo

1
./cloud_viewer

Code Example

see Part-4: Install and Configure PCL 1.8.1 with vtk qt support on windows 10 from source

Reference

for windows

History

  • 20180105: created.
  • 20180227: rewrite pcl compile part.

Series

Guide

  • qt: 5.7.0
  • qmake: 3.0
  • qtcreator: 3.5.1
  • vtk: 8.1.0 (from source)

install qt57

see Part-1: Install and Configure Qt5 on Ubuntu 16.04

install qt57 and qtcreator.

install vtk

download source

Download vtk source

1
2
wget https://www.vtk.org/files/release/8.1/VTK-8.1.0.tar.gz
wget https://www.vtk.org/files/release/8.1/VTKData-8.1.0.tar.gz

integrate VTK with Qt for a pretty graphical user interface, we need to turn on some options.

configure vtk with qt

1
2
3
4
cd VTK-8.1.0
mkdir build
cd build
cmake-gui ..

with options:

VTK_Group_Qt       ON
VTK_QT_VERSION     5 # default
QT5_DIR            /opt/qt/5.7/gcc_64/lib/cmake/Qt5

VTK_RENDERING_BACKEND OpenGL2 # default
BUILD_SHARED_LIBS  ON
CMAKE_BUILD_TYPE   Release
CMAKE_INSTALL_PREFIX /usr/local

set QT5_Dir to /opt/qt/5.7/gcc_64/lib/cmake/Qt5

download MD5 VTKData

tar VTKData-8.1.0.tar.gz and copy MD5 to VTK-8.1.0/build/ExternalData/Objects/MD5

make and install

1
2
make -j8
sudo make -j8 install

install to /usr/local/include/vtk-8.1 and /usr/local/lib/cmake/vtk-8.1

install QVTKWidget plugin

  1. copy libQVTKWidgetPlugin.so to
    install path: /usr/lib/x86_64-linux-gnu/qt5/plugins/designer
1
2
3
4
5
cd build/lib
ls -al libQVTKWidgetPlugin.so

# copy to qt creator
sudo cp libQVTKWidgetPlugin.so /usr/lib/x86_64-linux-gnu/qt5/plugins/designer
  1. now list designer plugins
1
2
ls /usr/lib/x86_64-linux-gnu/qt5/plugins/designer
libqquickwidget.so libQVTKWidgetPlugin.so libqwebview.so
  1. if we start qtcreator we will see an QVTKWidget at the bottom of VTK container of Design layout.

QVTKWidget for qtcreator

VTK Example

CylinderRenderingProperties.cpp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
#include <vtkCylinderSource.h>
#include <vtkPolyDataMapper.h>
#include <vtkActor.h>
#include <vtkRenderer.h>
#include <vtkRenderWindow.h>
#include <vtkRenderWindowInteractor.h>
#include <vtkProperty.h>
#include <vtkCamera.h>
#include <vtkSmartPointer.h>

int main(int, char *[])
{
// This creates a polygonal cylinder model with eight circumferential facets
// (i.e, in practice an octagonal prism).
vtkSmartPointer<vtkCylinderSource> cylinder =
vtkSmartPointer<vtkCylinderSource>::New();
cylinder->SetResolution(8);

// The mapper is responsible for pushing the geometry into the graphics library.
// It may also do color mapping, if scalars or other attributes are defined.
vtkSmartPointer<vtkPolyDataMapper> cylinderMapper =
vtkSmartPointer<vtkPolyDataMapper>::New();
cylinderMapper->SetInputConnection(cylinder->GetOutputPort());

// The actor is a grouping mechanism: besides the geometry (mapper), it
// also has a property, transformation matrix, and/or texture map.
// Here we set its color and rotate it around the X and Y axes.
vtkSmartPointer<vtkActor> cylinderActor =
vtkSmartPointer<vtkActor>::New();
cylinderActor->SetMapper(cylinderMapper);
cylinderActor->GetProperty()->SetColor(1.0000, 0.3882, 0.2784);
cylinderActor->RotateX(30.0);
cylinderActor->RotateY(-45.0);

// The renderer generates the image
// which is then displayed on the render window.
// It can be thought of as a scene to which the actor is added
vtkSmartPointer<vtkRenderer> renderer =
vtkSmartPointer<vtkRenderer>::New();
renderer->AddActor(cylinderActor);
renderer->SetBackground(0.1, 0.2, 0.4);
// Zoom in a little by accessing the camera and invoking its "Zoom" method.
renderer->ResetCamera();
renderer->GetActiveCamera()->Zoom(1.5);

// The render window is the actual GUI window
// that appears on the computer screen
vtkSmartPointer<vtkRenderWindow> renderWindow =
vtkSmartPointer<vtkRenderWindow>::New();
renderWindow->SetSize(200, 200);
renderWindow->AddRenderer(renderer);

// The render window interactor captures mouse events
// and will perform appropriate camera or actor manipulation
// depending on the nature of the events.
vtkSmartPointer<vtkRenderWindowInteractor> renderWindowInteractor =
vtkSmartPointer<vtkRenderWindowInteractor>::New();
renderWindowInteractor->SetRenderWindow(renderWindow);

// This starts the event loop and as a side effect causes an initial render.
renderWindowInteractor->Start();

return EXIT_SUCCESS;
}

CMakeLists.txt

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
cmake_minimum_required(VERSION 2.8)

PROJECT(CylinderRenderingProperties)

find_package(VTK REQUIRED)
include(${VTK_USE_FILE})

# /usr/local/lib/cmake/vtk-8.0/UseVTK.cmake
# C:/Program Files/PCL 1.8.1/3rdParty/VTK/lib/cmake/vtk-8.0/UseVTK.cmake

message ([vtk] ${VTK_LIBRARIES})

add_executable(CylinderRenderingProperties MACOSX_BUNDLE CylinderRenderingProperties.cxx )

target_link_libraries(CylinderRenderingProperties ${VTK_LIBRARIES})

compile

mkdir build && cd build && cmake-gui ..
make -j8

Reference

History

  • 20180108: created.

Tutorial

build qt library

QObject class

1
2
3
4
5
6
7
8
class  SHARED_EXPORT CameraGrabber : public QObject
{
Q_OBJECT
signals :
void sendFrameTrash(int nCameraID, int nTotalTrashFrame);
public:
explicit CameraGrabber(QObject *parent = 0);
}

CMakeLists.txt

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
set(CMAKE_INCLUDE_CURRENT_DIR ON)
set(CMAKE_AUTOMOC ON) # for qt SLOTS

add_library(${CAMERA_GRABBER_TARGET}
CaptureProcess.h
CaptureProcess.cpp
FindGrabber.h
FindGrabber.cpp
CameraGrabber.h
CameraGrabber.cpp
)

qt5_use_modules(${CAMERA_GRABBER_TARGET} Core)

target_link_libraries (${CAMERA_GRABBER_TARGET}
${CONGIF_TARGET}
${SAPERA_LIBRARIES}
)

build and get CameraGrabber.h,CameraGrabber.lib,CameraGrabber.dll and mocs_compilation.cpp.

use qt library

MainWindow.h

1
2
3
4
5
6
7
8
9
10
11
12
13
#include "Sensor/CameraGrabber/CameraGrabber.h" 

class MainWindow : public QMainWindow
{
Q_OBJECT

public:
explicit MainWindow(QWidget *parent=0);
~MainWindow();

private:
CameraGrabber *graber = nullptr;
}

MainWindow.cpp

1
2
3
4
5
#include "mainwindow.h"
#include "ui_mainwindow.h"

// moc cpp files for QObject
#include "build/src/car/Sensor/CameraGrabber/MySensorCameraGrabber_autogen/mocs_compilation.cpp"

CMakeLists.txt

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
set(CMAKE_INCLUDE_CURRENT_DIR ON)
set(CMAKE_AUTOMOC ON) # for qt project

#==============================================================
# cpp,ui,resource files
#==============================================================
# cpp files
#aux_source_directory(. SRC_LIST)
set(SRC_LIST
main.cpp
mainwindow.h
mainwindow.cpp
)

# ui files
qt5_wrap_ui(ui_FILES mainwindow.ui)

# resource files
qt5_add_resources(qrc_FILES resource.qrc)

message( [Main] ${SRC_LIST} )
message( [Main] ${ui_FILES} )
message( [Main] ${qrc_FILES} )

#==============================================================
# generate and link target for point_cloud_viewer
#==============================================================
set(TARGET_NAME aa_qt_main)
add_executable(${TARGET_NAME} ${SRC_LIST} ${ui_FILES} ${qrc_FILES})

# link qt libraries
qt5_use_modules(${TARGET_NAME} Core Widgets OpenGL Xml Gui Sql)

# link vtk and pcl libraries
target_link_libraries(${TARGET_NAME}
${CONFIG_TARGET}
${UTIL_TARGET}
${MODEL_TARGET}
${DETECTION_TARGET}
${DATABASE_TARGET}

# sensor
${CODER_LINKER_TARGET}
${RFID_TARGET}
${CAMERA_GRABBER_TARGET}
${LIDAR_GRABBER_TARGET}

${GTEST_LIBRARIES}
${BOOST_THREAD_LIBRARY}
${GLOG_LIBRARIES}

${Boost_LIBRARIES}
${OpenCV_LIBS}
${PCL_LIBRARIES}
)

tips

If we reference qt dll which use QObject,when we include header in MainWindow.h,we must also include mocs_compilation.cpp in MainWindow.cpp. Otherwise error will occur:

error LNK2001: 无法解析的外部符号 "public: static struct QMetaObject const staticMetaObject

Reference

History

  • 20180409: created.

Series

Tutorial

  • qt: 5.7.0
  • qmake: 3.0 based on Qt 5.7.0
  • qtcreator: 3.5.1 based on Qt 5.5.1

purge existing qt

1
2
sudo apt-get purge qt5-default qtcreator 
sudo apt-get purge qt4-designer qt4-dev-tools

install qt57

In addition, building graphical Qt applications requires OpenGL libraries and headers installed. On Ubuntu and other Debian-based Linux systems you can get OpenGL and the minimal set of development tools by installing the packages libgl1-mesa-dev and build-essential, i.e. by running this command:

1
sudo apt-get install build-essential libgl1-mesa-dev

Download qt and install.

1
2
3
4
sudo apt-get install build-essential libgl1-mesa-dev
wget http://download.qt.io/official_releases/qt/5.7/5.7.0/qt-opensource-linux-x64-5.7.0.run
chmod +x qt-opensource-linux-x64-5.7.0.run
./qt-opensource-linux-x64-5.7.0.run

install to /opt/Qt5.7.0

create links to /opt/qt/

1
2
cd /opt
sudo ln -s /opt/Qt5.7.0 qt

qtchooser

add qt57.conf

1
2
3
4
5
6
7
8
cat /usr/share/qtchooser/qt57.conf 

/opt/qt/5.7/gcc_64/bin
/opt/qt/5.7/gcc_64/lib

# link to qt57
cd /usr/lib/x86_64-linux-gnu/qtchooser
sudo ln -s /usr/share/qtchooser/qt57.conf qt57.conf
1
2
3
4
5
6
cd /usr/lib/x86_64-linux-gnu/qt-default/qtchooser

sudo rm default.conf
sudo ln -s /usr/share/qtchooser/qt57.conf default.conf
ls -l default.conf
default.conf -> /usr/share/qtchooser/qt57.conf

qt conf

/usr/share/qtchooser/

  • qt4-x86_64-linux-gnu.conf (FILE)
  • qt5-x86_64-linux-gnu.conf (FILE)
  • qt57.conf (FILE)

/usr/lib/x86_64-linux-gnu/qtchooser/

  • 4.conf (link)
  • 5.conf (link)
  • qt4.conf (link)
  • qt5.conf (link)
  • qt57.conf (link)

/usr/lib/x86_64-linux-gnu/qt-default/qtchooser/

  • default.conf (link) qt57.conf

qtchooser

1
2
3
4
5
6
7
8
9
10
11
12
which qtchooser 
/usr/bin/qtchooser

qtchooser -l

4
5
default
qt4-x86_64-linux-gnu
qt4
qt5-x86_64-linux-gnu
qt5

qtchooser env

1
2
3
4
qtchooser -print-env
QT_SELECT="default"
QTTOOLDIR="/opt/qt/5.7/gcc_64/bin"
QTLIBDIR="/opt/qt/5.7/gcc_64/lib"

export QT_QPA_PLATFORM_PLUGIN_PATH

1
2
vim .bashrc
export QT_QPA_PLATFORM_PLUGIN_PATH=/opt/qt/5.7/gcc_64/plugins/platforms

otherwise,errors may occur. Failed to load platform plugin "xcb".

QtCreator Tutorial

install qtcreator

1
2
3
4
sudo apt-get -y install qtcreator

qtcreator -version
Qt Creator 3.5.1 based on Qt 5.5.1

create application

  1. Start qtcreator and create a Qt Widget Application named hello.

  2. Add a resource file named resource.qrc.

     hello-->right click--> Add New... --->qt---> qt resource file---> name resource---> generate resource.qrc
    
  3. and then import images to resource.qrc

     Resources | resource.qrt--->right click---> Add Prefix...---> name /prefix
     Resources | resource.qrt | /prefix--->right click ---> Add Existing Files... ---> choose images ---> OK
    
  4. use resource in mainwindow.ui

     button ---> property page ---> icon ---> Choose Resource... ---> select image ---> OK
    

qt slots

method 1

steps:

mainwindow.ui ---> choose button ---> right click ---> Go to slot... ---> choose clicked() ---> OK

will add slots in mainwindow.h automatically

1
2
private slots:
void on_pushButtonOK_clicked();

and in mainwindow.cpp

1
2
3
4
void MainWindow::on_pushButtonOK_clicked()
{

}

Tips: if we use on_pushButtonOK_clicked style, there is no need to connect with slots in MainWindow constructor by hand.

method 2 (OK)

add slots in mainwindow.h by hand

1
2
private slots:
void pushButtonCancel_clicked();

and in mainwindow.cpp

1
2
3
4
void MainWindow::pushButtonCancel_clicked()
{

}

connect button with slot in mainwindow.cpp

1
2
3
4
5
6
7
8
9
MainWindow::MainWindow(QWidget *parent) :
QMainWindow(parent),
ui(new Ui::MainWindow)
{
ui->setupUi(this);

// connect
connect(ui->pushButtonCancel, SIGNAL(clicked()), this, SLOT(pushButtonCancel_clicked()));
}

Code Example

folder

folder structure like this:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
$ tree hello/

hello/
├── CMakeLists.txt
├── hello.pro
├── hello.pro.user
├── images
│   ├── kezunlin_logo.png
│   ├── logo.svg
│   └── searchicon.png
├── main.cpp
├── mainwindow.cpp
├── mainwindow.h
├── mainwindow.ui
└── resource.qrc

1 directory, 11 files

code

mainwinow.h

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
#ifndef MAINWINDOW_H
#define MAINWINDOW_H

#include <QMainWindow>

namespace Ui {
class MainWindow;
}

class MainWindow : public QMainWindow
{
Q_OBJECT

public:
explicit MainWindow(QWidget *parent = 0);
~MainWindow();

private slots:
void on_pushButtonOK_clicked(); // method1
void pushButtonCancel_clicked(); // method2

private:
Ui::MainWindow *ui;
};

#endif // MAINWINDOW_H

mainwindow.cpp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
#include "mainwindow.h"
#include "ui_mainwindow.h"

#include <QMessageBox>

MainWindow::MainWindow(QWidget *parent) :
QMainWindow(parent),
ui(new Ui::MainWindow)
{
ui->setupUi(this);

// connect
connect(ui->pushButtonCancel, SIGNAL(clicked()), this, SLOT(pushButtonCancel_clicked()));
}

MainWindow::~MainWindow()
{
delete ui;
}

void MainWindow::on_pushButtonOK_clicked()
{
QString text = ui->lineEditName->text();
QMessageBox::information(this, "OK", text);
ui->pushButtonOK->setText( tr("(OK. click me)") );
}

void MainWindow::pushButtonCancel_clicked()
{
QString text = ui->lineEditName->text();
QMessageBox::information(this, "Cancel", text);
ui->pushButtonCancel->setText( tr("(Cancel.click me)") );
}

main.cpp

1
2
3
4
5
6
7
8
9
10
11
#include "mainwindow.h"
#include <QApplication>

int main(int argc, char *argv[])
{
QApplication a(argc, argv);
MainWindow w;
w.show();

return a.exec();
}

CMakeLists.txt

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
cmake_minimum_required(VERSION 2.8.8)

project(helloworld)

# Find includes in corresponding build directories
set(CMAKE_INCLUDE_CURRENT_DIR ON)
# Instruct CMake to run moc automatically when needed.
set(CMAKE_AUTOMOC ON)

# ${QT_INCLUDE_DIRS} ${QT_LIBRARIES} and so on are all Qt4 Macro Definitions!!!!!
# Find the QtWidgets library
find_package(Qt5Core)
find_package(Qt5Widgets)
find_package(Qt5Gui)
find_package(Qt5OpenGL)
find_package(Qt5Xml)

#message( [qt] ${Qt5Core_INCLUDE_DIRS} )
#message( [qt] ${Qt5Core_LIBRARIES} )
#message( [qt] ${Qt5Widgets_INCLUDE_DIRS} )
#message( [qt] ${Qt5Widgets_LIBRARIES} )

# cpp files
aux_source_directory(. SRC_LIST)

# ui files
qt5_wrap_ui(ui_FILES mainwindow.ui)
# resource files
qt5_add_resources(qrc_FILES resource.qrc)

message( [Main] ${SRC_LIST} ) # ./main.cpp./mainwindow.cpp
message( [Main] ${ui_FILES} ) # build/ui_mainwindow.h
message( [Main] ${qrc_FILES} )# build/qrc_resource.cpp

# Tell CMake to create the helloworld executable
add_executable(${PROJECT_NAME} ${SRC_LIST} ${ui_FILES} ${qrc_FILES})

qt5_use_modules(${PROJECT_NAME} Core Widgets OpenGL Xml Gui)

# Use the Widgets module from Qt 5.
#qt5_use_modules(helloworld Widgets)

# link other libraries
#target_link_libraries (${PROJECT_NAME} ${SPEC_OPENCV_LIBS})

CMakeLists.txt 2

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
cmake_minimum_required(VERSION 3.0)

set(PROJECT_NAME demo)

set(CMAKE_BINARY_DIR ${CMAKE_SOURCE_DIR}/bin) # bin/

set(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR})
set(LIBRARY_OUTPUT_PATH ${CMAKE_BINARY_DIR})

set(CMAKE_CXX_STANDARD 11)
#set(CMAKE_PREFIX_PATH ${QTDIR})
set(CMAKE_AUTOMOC ON)
set(CMAKE_AUTOUIC ON)
set(CMAKE_AUTORCC ON)

# QT5_DIR /opt/Qt5.7.0/5.7/gcc_64/lib/cmake/Qt5
find_package(Qt5 REQUIRED Widgets Core Gui Network OpenGL)
find_package(OpenCV REQUIRED COMPONENTS core highgui imgproc features2d calib3d )
find_package(Protobuf REQUIRED) # 3.6.1
find_package(VTK REQUIRED) # 8.1.2
include(${VTK_USE_FILE})

MESSAGE( [Main] " VTK_INCLUDE_DIRS = ${VTK_INCLUDE_DIRS}")
MESSAGE( [Main] " VTK_LIBRARIES = ${VTK_LIBRARIES}")

find_package(PCL REQUIRED) # 1.9.1
#find_package(PCL REQUIRED COMPONENTS common io filters visualization)

link_directories(${PCL_LIBRARY_DIRS})
add_definitions(${PCL_DEFINITIONS})

message ([main] "PCL_DIR = ${PCL_DIR}")
message ([main] "PCL_FOUND = ${PCL_FOUND}")
message ([main] "PCL_INCLUDE_DIRS = ${PCL_INCLUDE_DIRS}")
message ([main] "PCL_LIBRARIES = ${PCL_LIBRARIES}")

message ([main] "PCL_LIBRARY_DIRS = ${PCL_LIBRARY_DIRS}")
message ([main] "PCL_COMMON_LIBRARIES = ${PCL_COMMON_LIBRARIES}")
message ([main] "PCL_IO_LIBRARIES = ${PCL_IO_LIBRARIES}")
message ([main] "PCL_FILTERS_LIBRARIES = ${PCL_FILTERS_LIBRARIES}")
message ([main] "PCL_VISUALIZATION_LIBRARIES = ${PCL_VISUALIZATION_LIBRARIES}")

include_directories(
./ # current folder

# ${GFLAGS_INCLUDE_DIRS}
# ${GLOG_INCLUDE_DIRS}
# ${GTEST_INCLUDE_DIRS}
${PROTOBUF_INCLUDE_DIRS}
${Boost_INCLUDE_DIRS}
${OpenCV_INCLUDE_DIRS}
${VTK_INCLUDE_DIRS}
${PCL_INCLUDE_DIRS}
)

set(SOURCE_FILES
main.cpp
MainWindow.cpp
./proto/camera_image.pb.cc
./proto/point_cloud.pb.cc
)

set(RESOURCE_FILE resource.qrc)

add_executable(${PROJECT_NAME} ${SOURCE_FILES} ${RESOURCE_FILE})

target_link_libraries(${PROJECT_NAME}
Qt5::Widgets Qt5::Gui Qt5::Core Qt5::OpenGL Qt5::Network
${Boost_LIBRARIES}
${PROTOBUF_LIBRARIES}
${OpenCV_LIBRARIES}
${VTK_LIBRARIES}
${PCL_LIBRARIES}
pthread
rt
)

cmake

1
2
3
4
5
cd hello
mkdir build
cd build
cmake ..
make

run

1
./helloworld

screen snapshot like this:

qt demo window

Reference

History

  • 20180108: created.

Tutorial

network proxy

  • System wide: Network—>None/Manual
  • chrome: can not set
  • firefox: about:preferences—> Network Proxy

eth0 config

rename xxx to eth0

errors may occur:

Ubuntu networking restart | cannot find device 'eth0'

check eth0

1
2
3
4
5
dmesg | grep eth 

[5.715564] e1000 0000:02:01.0 eth0: (PCI:66MHz:32-bit) 00:0c:29:7d:bf:43
[5.715573] e1000 0000:02:01.0 eth0: Intel(R) PRO/1000 Network Connection
[5.719709] e1000 0000:02:01.0 ens33: renamed from eth0

and we get ens33 renamed from eth0, we need to change it back to eth0.

solution

  1. edit grub and update

    1
    2
    3
    4
    5
    6
    $ sudo vim /etc/default/grub

    #GRUB_CMDLINE_LINUX=""
    GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0"

    $ sudo update-grub
  2. edit network interfaces /etc/network/interfaces

    1
    2
    3
    4
    5
    6
    7
    8
    auto lo
    iface lo inet loopback

    auto eth0
    iface eth0 inet static
    address 192.168.1.77
    netmask 255.255.255.0
    gateway 192.168.1.1
  3. reboot

    1
    sudo reboot now
  4. check eth0

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    $ sudo ifconfig

    eth0 Link encap:Ethernet HWaddr 80:fa:5b:47:92:8a
    inet addr:192.168.1.77 Bcast:192.168.1.255 Mask:255.255.255.0
    UP BROADCAST MULTICAST MTU:1500 Metric:1
    RX packets:0 errors:0 dropped:0 overruns:0 frame:0
    TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
    collisions:0 txqueuelen:1000
    RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)

    lo Link encap:Local Loopback
    inet addr:127.0.0.1 Mask:255.0.0.0
    inet6 addr: ::1/128 Scope:Host
    UP LOOPBACK RUNNING MTU:65536 Metric:1
    RX packets:4640 errors:0 dropped:0 overruns:0 frame:0
    TX packets:4640 errors:0 dropped:0 overruns:0 carrier:0
    collisions:0 txqueuelen:1000
    RX bytes:865530 (865.5 KB) TX bytes:865530 (865.5 KB)

OK. Now we get eth0,wlan0 properly set.

ldconfig

libEGL

error

1
2
3
4
$ sudo ldconfig

/sbin/ldconfig.real: /usr/lib/nvidia-384/libEGL.so.1 is not a symbolic link
/sbin/ldconfig.real: /usr/lib32/nvidia-384/libEGL.so.1 is not a symbolic link

fix

1
2
3
4
5
6
7
8
9
10
11
12
sudo rm /usr/lib/nvidia-384/libEGL.so 
sudo rm /usr/lib/nvidia-384/libEGL.so.1

sudo ln -s /usr/lib/nvidia-384/libEGL.so.384.90 /usr/lib/nvidia-384/libEGL.so.1
sudo ln -s /usr/lib/nvidia-384/libEGL.so.1 /usr/lib/nvidia-384/libEGL.so

# fix lib32
sudo rm /usr/lib32/nvidia-384/libEGL.so
sudo rm /usr/lib32/nvidia-384/libEGL.so.1

sudo ln -s /usr/lib32/nvidia-384/libEGL.so.384.90 /usr/lib32/nvidia-384/libEGL.so.1
sudo ln -s /usr/lib32/nvidia-384/libEGL.so.1 /usr/lib32/nvidia-384/libEGL.so

list libEGL.so of lib

1
2
3
4
5
$ ls -al libEGL.so*

lrwxrwxrwx 1 root root 33 1月 8 11:24 libEGL.so -> /usr/lib/nvidia-384/libEGL.so.1
lrwxrwxrwx 1 root root 38 1月 8 11:24 libEGL.so.1 -> /usr/lib/nvidia-384/libEGL.so.384.90
-rw-r--r-- 1 root root 15012 9月 20 08:44 libEGL.so.384.90

list libEGL.so of lib32

1
2
3
4
5
$ ls -al libEGL.so*

lrwxrwxrwx 1 root root 33 1月 8 11:24 libEGL.so -> /usr/lib32/nvidia-384/libEGL.so.1
lrwxrwxrwx 1 root root 38 1月 8 11:24 libEGL.so.1 -> /usr/lib32/nvidia-384/libEGL.so.384.90
-rw-r--r-- 1 root root 15012 9月 20 08:44 libEGL.so.384.90

check

$ sudo ldconfig

OK.

Reference

History

  • 20180108: created
  • 20180222: add eth0 part.

Install Guide

version

ROS release 	  ROS version	      Ubuntu version
2016.3	     ROS Kinetic Kame	Ubuntu 16.04(Xenial)/Ubuntu 15.10

quick commands

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
sudo sh -c '. /etc/lsb-release && echo "deb [arch=amd64] http://mirrors.ustc.edu.cn/ros/ubuntu/ $DISTRIB_CODENAME main" > /etc/apt/sources.list.d/ros-latest.list'

sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-key 421C365BD9FF1F717815A3895523BAEEB01FA116

sudo apt-get update
sudo apt-get install ros-kinetic-desktop-full

sudo apt-get install python-rosinstall python-rosinstall-generator python-wstool build-essential

sudo rosdep init
rosdep update

echo "source /opt/ros/kinetic/setup.bash" >> ~/.bashrc
source ~/.bashrc

# test
roscore

update source

1
sudo apt-get update

tinghua source
http://mirrors.tuna.tsinghua.edu.cn/ubuntu xenial

Configure ros source

ros offical(NOT RECOMMEND)

1
sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list'

NOT RECOMMEND,when apt-get update, error will occur Hash Sum mismatch

ros china(RECOMMEND)

1
2
sudo sh -c '. /etc/lsb-release && echo "deb [arch=amd64] http://mirrors.ustc.edu.cn/ros/ubuntu/ $DISTRIB_CODENAME main" > /etc/apt/sources.list.d/ros-latest.list'
sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-key 421C365BD9FF1F717815A3895523BAEEB01FA116

cat ros-latest.list

1
deb [arch=amd64] http://mirrors.ustc.edu.cn/ros/ubuntu/ xenial main

Install ros

ROS, rqt, rviz, robot-generic libraries, 2D/3D simulators, navigation and 2D/3D perception

1
2
3
4
5
6
7
8
9
10
sudo apt-get update
sudo apt-get install ros-kinetic-desktop-full

#sudo apt-get install ros-kinetic-desktop
#sudo apt-get install ros-kinetic-ros-base

#sudo apt-get install ros-kinetic-<PACKAGE>
#sudo apt-get install ros-kinetic-slam-gmapping

#apt-cache search ros-kinetic

Initialize rosdep

Before you can use ROS, you will need to initialize rosdep. rosdep enables you to easily install system dependencies for source you want to compile and is required to run some core components in ROS.

1
2
sudo rosdep init
rosdep update

will output

reading in sources list data from /etc/ros/rosdep/sources.list.d
Hit https://raw.githubusercontent.com/ros/rosdistro/master/rosdep/osx-homebrew.yaml
Hit https://raw.githubusercontent.com/ros/rosdistro/master/rosdep/base.yaml
Hit https://raw.githubusercontent.com/ros/rosdistro/master/rosdep/python.yaml
Hit https://raw.githubusercontent.com/ros/rosdistro/master/rosdep/ruby.yaml
Hit https://raw.githubusercontent.com/ros/rosdistro/master/releases/fuerte.yaml
Query rosdistro index https://raw.githubusercontent.com/ros/rosdistro/master/index.yaml
Add distro "groovy"
Add distro "hydro"
Add distro "indigo"
Add distro "jade"
Add distro "kinetic"
Add distro "lunar"
updated cache in /home/kezunlin/.ros/rosdep/sources.cache

Environment setup

1
2
echo "source /opt/ros/kinetic/setup.bash" >> ~/.bashrc
source ~/.bashrc

check ROS

1
2
3
4
5
6
7
8
env | grep ROS
export | grep ROS
declare -x ROSLISP_PACKAGE_DIRECTORIES=""
declare -x ROS_DISTRO="kinetic"
declare -x ROS_ETC_DIR="/opt/ros/kinetic/etc/ros"
declare -x ROS_MASTER_URI="http://localhost:11311"
declare -x ROS_PACKAGE_PATH="/opt/ros/kinetic/share"
declare -x ROS_ROOT="/opt/ros/kinetic/share/ros"

Dependencies for building packages

1
sudo apt-get install python-rosinstall python-rosinstall-generator python-wstool build-essential

Test install

1
roscore

output

... logging to /home/kezunlin/.ros/log/b777db6c-ff85-11e8-93c2-80fa5b47928a/roslaunch-ke-17139.log
Checking log directory for disk usage. This may take awhile.
Press Ctrl-C to interrupt
Done checking log file disk usage. Usage is <1GB.

started roslaunch server http://ke:36319/
ros_comm version 1.12.14


SUMMARY
========

PARAMETERS
 * /rosdistro: kinetic
 * /rosversion: 1.12.14

NODES

auto-starting new master
process[master]: started with pid [17162]
ROS_MASTER_URI=http://ke:11311/

setting /run_id to b777db6c-ff85-11e8-93c2-80fa5b47928a
process[rosout-1]: started with pid [17175]
started core service [/rosout]
^C[rosout-1] killing on exit
[master] killing on exit
shutting down processing monitor...
... shutting down processing monitor complete
done

Create Workspace

Create

Let’s create and build a catkin workspace:

1
2
3
4
5
6
mkdir -p ~/catkin_ws/src
cd ~/catkin_ws/
catkin_make

ls .
build dist src

tree src folder

src/
└── CMakeLists.txt -> /opt/ros/kinetic/share/catkin/cmake/toplevel.cmake

0 directories, 1 file

tree devel folder

devel
├── env.sh
├── lib
├── setup.bash
├── setup.sh
├── _setup_util.py
└── setup.zsh

1 directory, 5 files

The catkin_make command is a convenience tool for working with catkin workspaces.

source devel setup

before source devel/setup.bash

1
2
3
4
5
6
7
env | grep ROS
ROS_ROOT=/opt/ros/kinetic/share/ros
ROS_PACKAGE_PATH=/opt/ros/kinetic/share
ROS_MASTER_URI=http://localhost:11311
ROSLISP_PACKAGE_DIRECTORIES=
ROS_DISTRO=kinetic
ROS_ETC_DIR=/opt/ros/kinetic/etc/ros

after source devel/setup.bash

1
2
3
4
5
6
7
env | grep ROS
ROS_ROOT=/opt/ros/kinetic/share/ros
ROS_PACKAGE_PATH=/home/kezunlin/catkin_ws/src:/opt/ros/kinetic/share
ROS_MASTER_URI=http://localhost:11311
ROSLISP_PACKAGE_DIRECTORIES=/home/kezunlin/catkin_ws/devel/share/common-lisp
ROS_DISTRO=kinetic
ROS_ETC_DIR=/opt/ros/kinetic/etc/ros

To make sure your workspace is properly overlayed by the setup script, make sure ROS_PACKAGE_PATH environment variable includes the directory you’re in.

1
2
echo $ROS_PACKAGE_PATH
/home/kezunlin/catkin_ws/src:/opt/ros/kinetic/share

Reference

History

  • 2018/01/04: created.

Tutorial

While Caffe is made for deep networks it can likewise represent “shallow” models like logistic regression for classification. We’ll do simple logistic regression on synthetic data that we’ll generate and save to HDF5 to feed vectors to Caffe. Once that model is done, we’ll add layers to improve accuracy. That’s what Caffe is about: define a model, experiment, and then deploy.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline

import os
os.chdir('..')

import sys
sys.path.insert(0, './python')
import caffe


import os
import h5py
import shutil
import tempfile

import sklearn
import sklearn.datasets
import sklearn.linear_model

import pandas as pd

Synthesize a dataset of 10,000 4-vectors for binary classification with 2 informative features and 2 noise features.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
X, y = sklearn.datasets.make_classification(
n_samples=10000, n_features=4, n_redundant=0, n_informative=2,
n_clusters_per_class=2, hypercube=False, random_state=0
)
print 'data,',X.shape,y.shape # (10000, 4) (10000,) x0,x1,x2,x3, y

# Split into train and test
X, Xt, y, yt = sklearn.model_selection.train_test_split(X, y)
print 'train,',X.shape,y.shape #train: (7500, 4) (7500,)
print 'test,', Xt.shape,yt.shape#test: (2500, 4) (2500,)

# Visualize sample of the data
ind = np.random.permutation(X.shape[0])[:1000] # (7500,)--->(1000,) x0,x1,x2,x3, y
df = pd.DataFrame(X[ind])
_ = pd.plotting.scatter_matrix(df, figsize=(9, 9), diagonal='kde', marker='o', s=40, alpha=.4, c=y[ind])
data, (10000, 4) (10000,)
train, (7500, 4) (7500,)
test, (2500, 4) (2500,)

scatter matrix

Learn and evaluate scikit-learn’s logistic regression with stochastic gradient descent (SGD) training. Time and check the classifier’s accuracy.

1
2
3
4
5
6
7
8
%%timeit
# Train and test the scikit-learn SGD logistic regression.
clf = sklearn.linear_model.SGDClassifier(
loss='log', n_iter=1000, penalty='l2', alpha=5e-4, class_weight='balanced')

clf.fit(X, y)
yt_pred = clf.predict(Xt)
print('Accuracy: {:.3f}'.format(sklearn.metrics.accuracy_score(yt, yt_pred)))
Accuracy: 0.781
Accuracy: 0.781
Accuracy: 0.781
Accuracy: 0.781
1 loop, best of 3: 372 ms per loop

Save the dataset to HDF5 for loading in Caffe.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# Write out the data to HDF5 files in a temp directory.
# This file is assumed to be caffe_root/examples/hdf5_classification.ipynb
dirname = os.path.abspath('./examples/hdf5_classification/data')
if not os.path.exists(dirname):
os.makedirs(dirname)

train_filename = os.path.join(dirname, 'train.h5')
test_filename = os.path.join(dirname, 'test.h5')

# HDF5DataLayer source should be a file containing a list of HDF5 filenames.
# To show this off, we'll list the same data file twice.
with h5py.File(train_filename, 'w') as f:
f['data'] = X
f['label'] = y.astype(np.float32)
with open(os.path.join(dirname, 'train.txt'), 'w') as f:
f.write(train_filename + '\n')
f.write(train_filename + '\n')

# HDF5 is pretty efficient, but can be further compressed.
comp_kwargs = {'compression': 'gzip', 'compression_opts': 1}
with h5py.File(test_filename, 'w') as f:
f.create_dataset('data', data=Xt, **comp_kwargs)
f.create_dataset('label', data=yt.astype(np.float32), **comp_kwargs)
with open(os.path.join(dirname, 'test.txt'), 'w') as f:
f.write(test_filename + '\n')

Let’s define logistic regression in Caffe through Python net specification. This is a quick and natural way to define nets that sidesteps manually editing the protobuf model.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
from caffe import layers as L
from caffe import params as P

def logreg(hdf5, batch_size):
# logistic regression: data, matrix multiplication, and 2-class softmax loss
n = caffe.NetSpec()
n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
n.ip1 = L.InnerProduct(n.data, num_output=2, weight_filler=dict(type='xavier'))
n.accuracy = L.Accuracy(n.ip1, n.label)
n.loss = L.SoftmaxWithLoss(n.ip1, n.label)
return n.to_proto()

train_net_path = 'examples/hdf5_classification/logreg_auto_train.prototxt'
with open(train_net_path, 'w') as f:
f.write(str(logreg('examples/hdf5_classification/data/train.txt', 10)))

test_net_path = 'examples/hdf5_classification/logreg_auto_test.prototxt'
with open(test_net_path, 'w') as f:
f.write(str(logreg('examples/hdf5_classification/data/test.txt', 10)))

Now, we’ll define our “solver” which trains the network by specifying the locations of the train and test nets we defined above, as well as setting values for various parameters used for learning, display, and “snapshotting”.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
from caffe.proto import caffe_pb2

def solver(train_net_path, test_net_path):
s = caffe_pb2.SolverParameter()

# Specify locations of the train and test networks.
s.train_net = train_net_path
s.test_net.append(test_net_path)

s.test_interval = 1000 # Test after every 1000 training iterations.
s.test_iter.append(250) # Test 250 "batches" each time we test.

s.max_iter = 10000 # # of times to update the net (training iterations)

# Set the initial learning rate for stochastic gradient descent (SGD).
s.base_lr = 0.01

# Set `lr_policy` to define how the learning rate changes during training.
# Here, we 'step' the learning rate by multiplying it by a factor `gamma`
# every `stepsize` iterations.
s.lr_policy = 'step'
s.gamma = 0.1
s.stepsize = 5000

# Set other optimization parameters. Setting a non-zero `momentum` takes a
# weighted average of the current gradient and previous gradients to make
# learning more stable. L2 weight decay regularizes learning, to help prevent
# the model from overfitting.
s.momentum = 0.9
s.weight_decay = 5e-4

# Display the current training loss and accuracy every 1000 iterations.
s.display = 1000

# Snapshots are files used to store networks we've trained. Here, we'll
# snapshot every 10K iterations -- just once at the end of training.
# For larger networks that take longer to train, you may want to set
# snapshot < max_iter to save the network and training state to disk during
# optimization, preventing disaster in case of machine crashes, etc.
s.snapshot = 10000
s.snapshot_prefix = 'examples/hdf5_classification/data/train'

# We'll train on the CPU for fair benchmarking against scikit-learn.
# Changing to GPU should result in much faster training!
s.solver_mode = caffe_pb2.SolverParameter.CPU

return s

solver_path = 'examples/hdf5_classification/logreg_solver.prototxt'
with open(solver_path, 'w') as f:
f.write(str(solver(train_net_path, test_net_path)))

Time to learn and evaluate our Caffeinated logistic regression in Python.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
%%timeit
caffe.set_mode_cpu()
solver = caffe.get_solver(solver_path)
solver.solve()

accuracy = 0
batch_size = solver.test_nets[0].blobs['data'].num
test_iters = int(len(Xt) / batch_size)
for i in range(test_iters):
solver.test_nets[0].forward()
accuracy += solver.test_nets[0].blobs['accuracy'].data
accuracy /= test_iters

print("Accuracy: {:.3f}".format(accuracy))
Accuracy: 0.770
Accuracy: 0.770
Accuracy: 0.770
Accuracy: 0.770
1 loop, best of 3: 195 ms per loop

Do the same through the command line interface for detailed output on the model and solving.

1
!./build/tools/caffe train -solver examples/hdf5_classification/logreg_solver.prototxt
I0224 00:32:03.232779   655 caffe.cpp:178] Use CPU.
I0224 00:32:03.391911   655 solver.cpp:48] Initializing solver from parameters: 
train_net: "examples/hdf5_classification/logreg_auto_train.prototxt"
test_net: "examples/hdf5_classification/logreg_auto_test.prototxt"
......
I0224 00:32:04.087514   655 solver.cpp:406]     Test net output #0: accuracy = 0.77
I0224 00:32:04.087532   655 solver.cpp:406]     Test net output #1: loss = 0.593815 (* 1 = 0.593815 loss)
I0224 00:32:04.087541   655 solver.cpp:323] Optimization Done.
I0224 00:32:04.087548   655 caffe.cpp:222] Optimization Done.

If you look at output or the logreg_auto_train.prototxt, you’ll see that the model is simple logistic regression.
We can make it a little more advanced by introducing a non-linearity between weights that take the input and weights that give the output – now we have a two-layer network.
That network is given in nonlinear_auto_train.prototxt, and that’s the only change made in nonlinear_logreg_solver.prototxt which we will now use.

The final accuracy of the new network should be higher than logistic regression!

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
from caffe import layers as L
from caffe import params as P

def nonlinear_net(hdf5, batch_size):
# one small nonlinearity, one leap for model kind
n = caffe.NetSpec()
n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
# define a hidden layer of dimension 40
n.ip1 = L.InnerProduct(n.data, num_output=40, weight_filler=dict(type='xavier'))
# transform the output through the ReLU (rectified linear) non-linearity
n.relu1 = L.ReLU(n.ip1, in_place=True)
# score the (now non-linear) features
n.ip2 = L.InnerProduct(n.ip1, num_output=2, weight_filler=dict(type='xavier'))
# same accuracy and loss as before
n.accuracy = L.Accuracy(n.ip2, n.label)
n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
return n.to_proto()

train_net_path = 'examples/hdf5_classification/nonlinear_auto_train.prototxt'
with open(train_net_path, 'w') as f:
f.write(str(nonlinear_net('examples/hdf5_classification/data/train.txt', 10)))

test_net_path = 'examples/hdf5_classification/nonlinear_auto_test.prototxt'
with open(test_net_path, 'w') as f:
f.write(str(nonlinear_net('examples/hdf5_classification/data/test.txt', 10)))

solver_path = 'examples/hdf5_classification/nonlinear_logreg_solver.prototxt'
with open(solver_path, 'w') as f:
f.write(str(solver(train_net_path, test_net_path)))
1
2
3
4
5
6
7
8
9
10
11
12
13
14
%%timeit
caffe.set_mode_cpu()
solver = caffe.get_solver(solver_path)
solver.solve()

accuracy = 0
batch_size = solver.test_nets[0].blobs['data'].num
test_iters = int(len(Xt) / batch_size)
for i in range(test_iters):
solver.test_nets[0].forward()
accuracy += solver.test_nets[0].blobs['accuracy'].data
accuracy /= test_iters

print("Accuracy: {:.3f}".format(accuracy))
Accuracy: 0.838
Accuracy: 0.837
Accuracy: 0.838
Accuracy: 0.834
1 loop, best of 3: 277 ms per loop

Do the same through the command line interface for detailed output on the model and solving.

1
!./build/tools/caffe train -solver examples/hdf5_classification/nonlinear_logreg_solver.prototxt
I0224 00:32:05.654265   658 caffe.cpp:178] Use CPU.
I0224 00:32:05.810444   658 solver.cpp:48] Initializing solver from parameters: 
train_net: "examples/hdf5_classification/nonlinear_auto_train.prototxt"
test_net: "examples/hdf5_classification/nonlinear_auto_test.prototxt"
......
I0224 00:32:06.078208   658 solver.cpp:406]     Test net output #0: accuracy = 0.8388
I0224 00:32:06.078225   658 solver.cpp:406]     Test net output #1: loss = 0.382042 (* 1 = 0.382042 loss)
I0224 00:32:06.078234   658 solver.cpp:323] Optimization Done.
I0224 00:32:06.078241   658 caffe.cpp:222] Optimization Done.
1
2
# Clean up (comment this out if you want to examine the hdf5_classification/data directory).
shutil.rmtree(dirname)

Reference

History

  • 20180102: created.

Series

Hexo Tutorial

Github recommends us to use Jekyll to manage static pages, which is based on Ruby and is difficult for us to install and configure. So we use Hexo instead. Hexo is a static blog framework similar to Jekyll ,which is based on Node.js and easier for use to use.

use Github to create repo

  1. create a new repo in github, name by username.github.io: kezunlin.github.io

  2. Setting | Github Pages, choose a theame and deploy.

install nodejs by apt-get

1
2
3
4
5
sudo apt-get -y install nodejs
sudo apt-get -y install nodejs-legacy
sudo apt-get -y install npm
node -v
npm -v

install nodejs from source

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# download and compile
wget https://nodejs.org/dist/v8.9.3/node-v8.9.3.tar.gz
tar xzvf node-v8.9.3.tar.gz
cd node-v8.9.3
./configure
make -j8
sudo make install

# link to /usr/bin
sudo ln -s /usr/local/bin/node /usr/bin/node
sudo ln -s /usr/local/bin/npm /usr/bin/npm

# check version
node -v
npm -v

test node

1
2
3
4
5
cat hello.js
console.log('Hello World');

node hello.js
Hello World

install hexo

1
2
3
4
5
6
# install hexo globally
sudo npm install hexo-cli -g
#sudo npm install hexo --save

# use cnpm from taobao instead of offical npm, which is slow for chinese users.
sudo npm install -g cnpm --registry=https://registry.npm.taobao.org

use cnpm instead of npm (optional)

1
2
3
4
5
# cnpm install 
sudo cnpm install hexo-cli -g

# check version
hexo -v

create hexo site

1
2
3
4
5
6
7
8
cd workspace
mkdir blog
cd blog

hexo init
#npm install
hexo generate
hexo server

now we can visit localhost:4000 and create posts.

deploy to github

vim blog/_config.yml

1
2
3
4
deploy:
type: git
repo: [email protected]:kezunlin/kezunlin.github.io.git
branch: master

generate ssh-key and copy to github

1
2
3
4
5
6
7
8
9
10
11
# generate ssh-key 
cd ~
ssh-keygen
cat .ssh/id_rsa.pub

# copy content to github
# https://github.com/settings/keys

# install plungin and deploy to github
npm install hexo-deployer-git --save
hexo deploy

ssh-keygen -t rsa -C “your_email@example.com

now we can visit https://kezunlin.github.io/

add README and skip render

  1. add README.md to source folder

  2. edit blog/_config.yml to skip render README.md

    1
    2
    skip_render:
    - README.md
  3. use hexo generate to copy README.md from source/ to public/

new post and deploy again

1
2
3
4
5
6
hexo new 'first post'
vim source/_posts/first-post.md

hexo generate
hexo server
hexo deploy

now we can visit https://kezunlin.github.io/ and see our first post.

Appendix

hexo commands

Hexo common commands:

1
2
3
4
5
6
hexo new "postName"       #new post
hexo new page "pageName" #new page
hexo generate #generate static files to public/
hexo server #start server on localhost:4000
hexo deploy #push .deploy_git/ to GitHub
hexo clean #clean files

Hexo short commands:

1
2
3
4
hexo n == hexo new
hexo g == hexo generate
hexo s == hexo server
hexo d == hexo deploy

Hexo composite commands:

1
2
hexo server -g
hexo deploy -g

front-matter

1
2
3
4
5
6
7
8
9
10
---
title: Using Github Pages and Hexo to manage personal blogs
date: 2017-12-26 17:28:10
categories: tutorial
tags:
- github pages
- hexo
- nodejs
- npm
---

see front-matter details

more

use <!--more--> to control web content

Use next theme

1
2
cd blog
git clone https://github.com/iissnan/hexo-theme-next themes/next

vim blog/_config.yml

1
2
#theme: landscape
theme: next

Avatar

edit blog\themes\next\_config.yml

1
avatar: /images/avatar.jpg

Plugins

install plugin by

1
npm install <plugin-name> --save

hexo admin

1
cnpm install --save hexo-admin

now we can visit http://localhost:4000/admin/

git deployer

1
npm install hexo-deployer-git --save

rss feed

1
npm install hexo-generator-feed --save

visit http://localhost:4000/atom.xml

sitemap

1
npm install hexo-generator-sitemap --save

vim blog/_config.yml

1
2
sitemap:
path: sitemap.xml

now we can visit http://localhost:4000/sitemap.xml

baidu sitemap

1
npm install hexo-generator-baidu-sitemap --save

vim blog/_config.yml

1
2
baidusitemap:
path: baidusitemap.xml

now we can visit http://localhost:4000/baidusitemap.xml

1
cnpm install hexo-abbrlink --save

edit blog\_config.yml

1
2
3
4
permalink: post/:abbrlink/
abbrlink:
alg: crc32 # crc16(default) and crc32
rep: hex # dec(default) and hex

will fill abbrlink in your post.md

1
2
3
4
5
6
7
8
9
---
title: Hello World
categories:
- tutorial
tags:
- hexo
abbrlink: 4a17b156
date: 2017-12-26 17:20:10
---

index/archive/category/tag

1
2
3
4
npm install hexo-generator-index --save
npm install hexo-generator-archive --save
npm install hexo-generator-category --save
npm install hexo-generator-tag --save

tags list page

1
2
hexo new page "tags"
# generate source/tags/index.md

edit source/tags/index.md

1
2
3
4
5
---
title: tags
date: 2017-12-27 15:46:09
type: "tags"
---

now we can visit http://localhost:4000/tags/

categories list page

1
2
hexo new page "categories"
# generate source/categories/index.md

edit source/categories/index.md

1
2
3
4
5
---
title: categories
date: 2017-12-27 15:46:03
type: "categories"
---

now we can visit http://localhost:4000/categories/

install search plugin

1
cnpm install hexo-generator-search --save

edit themes\next\_config.yml

1
2
3
4
5
6
7
local_search:
enable: true # create a new 'Search' button next to 'Archives'
# if auto, trigger search by changing input
# if manual, trigger search by pressing enter key or search button
trigger: auto
# show top n results per article, show all results by setting to -1
top_n_per_article: 1

edit blog\_config.yml

1
2
3
4
5
search:
path: search.xml
field: post
format: html
limit: 10000

Upload images to cnblog

  1. install img-uploader chrome extension by here
  2. upload image and get url.
  3. use url in markdown.
1
![alt](http://images2017.cnblogs.com/.../123.png "title")

Multiple deploy

  • deploy to github.com: username.github.io
  • deploy to coding.net (gitcaffe): username

vim blog/_config.yml

1
2
3
4
5
deploy:
type: git
repo:
github: [email protected]:<username>/<username>.github.io.git,master
coding: [email protected]:<username>/<username>.git,master

Advanced

custom domain and https

  • blog: Github pages
  • SSL:CloudFlare
  • domain: Godaddy (dns nameservers from CloudFlare)

get ips by

1
2
3
4
5
6
dig kezunlin.github.io +noall +answer

185.199.108.153
185.199.109.153
185.199.110.153
185.199.111.153
  • A: point to 185.199.108/109/110/111.153
  • CNAME: point to kezunlin.me

steps:

  1. get kezunlin.me from Godaddy.com
  2. add kezunlin.me to github blog’s blog\source\CNAME file
  3. register CloudFlare.com and add A record with github page IP 185.199.108/109/110/111.153
    cloudflare a records
  4. Go to Godaddy.com and add dns nameservers dina.ns.cloudflare.com and paul.ns.cloudflare.com from here
    godaddy dns
  5. wait for some hours(24 hours) and we get results from CloudFlare
1
2
3
4
5
6
kezunlin.me
Status: Active

This website is active on Cloudflare.

Universal SSL Status Active Certificate

Active means nameservers take effect.

configure Page Rules | rules for Always use HTTPS

google analytics

  1. get google-site-verification from google search console and add to themes/next/layout/_partials/head.swig
    1
    <meta name="google-site-verification" content="***" />
  2. get google_analytics and edit themes\next\_config.yml
    1
    google_analytics: UA-***

google adsense

  • google-adsense-header.js
  • google-adsense-display.js
  • google-adsense-article.js

google-adsense-header.js

1
<script data-ad-client="ca-pub-5653382914441020" async src="https://pagead2.googlesyndication.com/pagead/js/adsbygoogle.js"></script>

baidu zhanzhang

get baidu-site-verification from https://ziyuan.baidu.com/ and add to themes/next/layout/_partials/head.swig

1
<meta name="baidu-site-verification" content="***" />

360 zhanzhang

get 360-site-verification from http://zhanzhang.so.com/sitetool/ and add to themes/next/layout/_partials/head.swig

1
<meta name="360-site-verification" content="***" />

nofollow

1
cnpm install hexo-filter-nofollow --save

edit blog\_config.yml

1
2
3
4
5
6
nofollow:
enable: true
field: site
exclude:
- 'exclude1.com'
- 'exclude2.com'

see hexo-filter-nofollow

code highlight

1
cnpm install --save hexo-prism-plugin

edit blog\_config.yml

1
2
3
4
5
6
7
8
highlight:
enable: false

prism_plugin:
mode: 'preprocess' # realtime/preprocess
theme: 'tomorrow'
line_number: false # default false
custom_css:

see prismjs and matery

1
cnpm i hexo-permalink-pinyin --save

edit blog\_config.yml

1
2
3
permalink_pinyin:
enable: true
separator: '-' # default: '-'

only for post url with chinese words

recommend posts

install plugin

1
2
npm install hexo-recommended-posts --save
hexo recommend

edit blog\_config.yml

1
2
3
4
5
6
7
8
9
recommended_posts:
autoDisplay: false # 自动在文章底部显示推荐文章,如果你使用Material-X主题,这里要设置为false。
server: https://api.truelaurel.com # 后端推荐服务器地址
timeoutInMillis: 10000 # 服务时长,超过此时长,则使用离线推荐模式
excludePattern: []
titleHtml: Related Recommend Posts #自定义标题
internalLinks: 4 # 内部文章数量
externalLinks: 1 # 外部文章数量
fixedNumber: false
1
npm install hexo-related-popular-posts --save

see here

pin top post

1
cnpm install hexo-generator-index-pin-top --save 

and edit _posts/your-post.md

1
2
3
4
5
6
7
8
---
title: hexo blog
top: 1
tags:
- hexo
categories:
- blog
---

see here

404

  1. hexo new page "404"
  2. edit blog\source\404\index.md
  3. hexo generate to generate blog\public\404.html
  4. hexo deploy to deploy blog to github.
  5. now we can access https://kezunlin.me/404.html

when 404 error occur, github will serve https://kezunlin.me/404.html as result.

stats busuanzi

edit /theme/next/layout/_third-party/analytics/busuanzi-counter.swig

replace

1
<script async src="https://dn-lbstatics.qbox.me/busuanzi/2.3/busuanzi.pure.mini.js"></script>

with

1
<script async src="https://busuanzi.ibruce.info/busuanzi/2.3/busuanzi.pure.mini.js"></script>

edit blog\themes\next\_config.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
busuanzi_count:
# count values only if the other configs are false
enable: true
# custom uv span for the whole site
site_uv: true
site_uv_header: <i class="fa fa-user"></i>
site_uv_footer: Visitors
# custom pv span for the whole site
site_pv: true
site_pv_header: <i class="fa fa-eye"></i>
site_pv_footer: Total Visits
# custom pv span for one page only
page_pv: true
page_pv_header: <i class="fa fa-eye"></i>
page_pv_footer: Reads

gitment for comment (not)

We can use github repo to store blog site’s comments in issues

register OAuth Application

  1. visit https://github.com/settings/applications/new
  2. fill in blanks, callback URL: https://kezunlin.me
  3. get client ID and client secret

go to https://github.com/settings/developers to check your OAuth Apps

gitment config

  1. create a new repo named gitment in Github for storing comments in issues
  2. edit blog\themes\next\_config.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
gitment:
enable: true
mint: true # RECOMMEND, A mint on Gitment, to support count, language and proxy_gateway
count: true # Show comments count in post meta area
lazy: true # Comments lazy loading with a button
cleanly: false # Hide 'Powered by ...' on footer, and more
language: zh-Hans # Force language, or auto switch by theme
github_user: kezunlin # MUST HAVE, Your Github ID
github_repo: gitment # MUST HAVE, The repo you use to store Gitment comments
client_id: xxx # MUST HAVE, Github client id for the Gitment
client_secret: yyy # EITHER this or proxy_gateway, Github access secret token for the Gitment
proxy_gateway: # Address of api proxy, See: https://github.com/aimingoo/intersect
redirect_protocol: # Protocol of redirect_uri with force_redirect_protocol when mint enabled

Notice

  • github_user: kezunlin
  • github_repo: gitment

init page comment

  1. hexo deploy to deploy blogs
  2. visit page and click button Initialize Comment
  3. post your first comment.

error fix: https://github.com/imsun/gitment/issues/188

gitalk for comment

see gitalk for hexo next

valine

skip now.

copy code (not)

customize hexo

shareJS

see here

optimize for speed

multiple deploy

deploy to coding.net.

hexo-neat

1
cnpm install hexo-neat --save

edit blog\_config.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# hexo-neat
neat_enable: true

neat_html:
enable: true
exclude:

neat_css:
enable: true
exclude:
- '*.min.css'

neat_js:
enable: false
mangle: true
output:
compress:
exclude:
- '*.min.js'

hexo-all-minifier (not)

1
npm install hexo-all-minifier --save

edit blog\_config.yml

1
all_minifier: true

Tips: not enable minifier currently because not stable.

hexo-filter-optimize (not)

1
cnpm install hexo-filter-optimize

and edit blog\theme\next\_config.yml

1
2
filter_optimize:
enable: true

Lazy load

Lazy load offscreen images with lazysizes

cdn

skip

multi language

sel tools

very good ref here

get alexa rank

access alexa rank for kezunlin.me

output

1
2
3
4
5
6
7
8
9
10
11
12
<!--
Need more Alexa data? Find our APIs here: https://aws.amazon.com/alexa/
-->
<ALEXA VER="0.9" URL="kezunlin.me/" HOME="0" AID="=" IDN="kezunlin.me/">
<RLS PREFIX="http://" more="0"> </RLS>
<SD TITLE="A" FLAGS="" HOST="kezunlin.me"> </SD>
<SD>
<POPULARITY URL="kezunlin.me/" TEXT="4070983" SOURCE="panel"/>
<REACH RANK="3600610"/>
<RANK DELTA="+1226051"/>
</SD>
</ALEXA>

kezunlin.me alexa rank is 4070983.

image resource

Errors

hexo Error watch ENOSPC

1
2
3
4
5
> hexo server 
(node:7563) [DEP0061] DeprecationWarning: fs.SyncWriteStream is deprecated.
INFO Start processing
FATAL Something's wrong. Maybe you can find the solution here: http://hexo.io/docs/troubleshooting.html
Error: watch /media/kezunlin/Workspace/git/blog/source/_posts ENOSPC

solution:

1
echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf && sudo sysctl -p

ERROR Local hexo not found in

1
2
hexo -v
ERROR Local hexo not found in /home/kezunlin/git/blog

solution:

1
2
3
cd blog
rm node_modules
cnpm install --save

cache

google chrome force reload page without cache

steps

F12->Application->Clear Storage->Clear site data

clear site data

Reference

History

  • 2017/12/26: created.
  • 2017/12/27: add Appendix,use next theame,add tags/categories page.
  • 2017/12/28: add Advanced, use gitment,baidushare,local search,etc.
  • 2018/01/02: upload images to cnblogs.
  • 2018/01/03: hexo-neat to compress,cdn,etc.
  • 2018/01/22: add part2.
  • 2018/09/05: add ssl.
  • 2019/11/07: reorganize post contents.