# - Try to find GTEST # # The following variables are optionally searched for defaults # GTEST_ROOT_DIR: Base directory where all GTEST components are found # # The following are set after configuration is done: # GTEST_FOUND # GTEST_INCLUDE_DIRS # GTEST_LIBRARIES # GTEST_LIBRARYRARY_DIRS
include(FindPackageHandleStandardArgs)
set(GTEST_ROOT_DIR "" CACHE PATH "Folder contains Google gtest")
# Link runTests with what we want to test and the GTest and pthread library add_executable(runTests tests.cpp) target_link_libraries(runTests ${GTEST_LIBRARIES}${BOOST_THREAD_LIBRARY}) #target_link_libraries(runTests gtest pthread)
compile
1 2
mkdir build && cd build && cmake-gui .. make -j8
run
1
./runTests
link (optional)
use -lgtest as linker flag and optionally, if you did not write your own test mainroutine, the explicit -lgtest_main flag.
/* second millisecond microsecond nanosecond Format Flags %f Fractional seconds are always used, even when their value is zero "13:15:16.000000" %F * Fractional seconds are used only when their value is not zero. "13:15:16" "05:04:03.001234" */
std::string ptime_2_str_name(boost::posix_time::ptime now) { // https://stackoverflow.com/questions/5018188/how-to-format-a-datetime-to-string-using-boost // for multiple use std::stringstream ss; //static std::locale loc(std::cout.getloc(), new time_facet("%Y%m%d_%H%M%S_%f")); static std::locale loc(ss.getloc(), new time_facet("%Y%m%d_%H%M%S_%f")); ss.imbue(loc); ss << now; return ss.str(); // 20180118_111501_208797 }
# Specify the minimum version for CMake cmake_minimum_required(VERSION 2.8)
# Project's name project(date_time)
# Set the output folder where your program will be created set(CMAKE_BINARY_DIR ${CMAKE_SOURCE_DIR}/bin) set(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}) set(LIBRARY_OUTPUT_PATH ${CMAKE_BINARY_DIR})
wget https://www.coin-or.org/download/source/metslib/metslib-0.5.3.tgz tar xzvf metslib-0.5.3.tgz cd metslib-0.5.3 ./configure make sudo make install
glxinfo
install
1 2
sudo apt-get install mesa-utils glxinfo
possible error
X Error of failed request: BadRequest (invalid request code or no such operation) Major opcode of failed request: 154 (GLX) Minor opcode of failed request: 34 () Serial number of failed request: 34 Current serial number in output stream: 33
fix,make sure NVIDIA drivers are installed successfully and no conflict. check display OpenGL info
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
$ glxinfo | grep OpenGL
OpenGL vendor string: NVIDIA Corporation OpenGL renderer string: GeForce GTX 1060/PCIe/SSE2 OpenGL core profile version string: 4.5.0 NVIDIA 384.90 OpenGL core profile shading language version string: 4.50 NVIDIA OpenGL core profile context flags: (none) OpenGL core profile profile mask: core profile OpenGL core profile extensions: OpenGL version string: 4.5.0 NVIDIA 384.90 OpenGL shading language version string: 4.50 NVIDIA OpenGL context flags: (none) OpenGL profile mask: (none) OpenGL extensions: OpenGL ES profile version string: OpenGL ES 3.2 NVIDIA 384.90 OpenGL ES profile shading language version string: OpenGL ES GLSL ES 3.20 OpenGL ES profile extensions:
It means we use OpenGL version 4.5.0 for NVIDIA display.
Or by nvidia-settings, NVIDIA X Server Settings–> X Screen 0 –> OpenGL/GLX Information.
QT_USE_FILE /home/kezunlin/program/pcl-1.8.1/build/use-qt5.cmake
VTK_DIR /usr/local/lib/cmake/vtk-8.1
CMAKE_BUILD_TYPE Release
CMAKE_CONFIGURATION_TYPES Release
CMAKE_INSTALL_PREFIX /usr/local
PCL_SHARED_LIBS ON
PCL_QT_VERSION 5
PCL_ENABLE_SSE ON
Build_visualization ON
Build_apps ON
Build_examples OFF # error may occur
Using CPU native flags for SSE optimization: -march=native
make and install
1 2 3
# it may take several minutes, wait ... make -j8 sudo make -j8 install
cmake install to /usr/local/bin, /usr/local/lib/, /usr/local/include/pcl-1.8 PCL_DIR will be /usr/local/share/pcl-1.8
error for example
/home/kezunlin/program/pcl-1.8.1/examples/segmentation/example_cpc_segmentation.cpp:493:17: error: ‘class vtkUnsignedCharArray’ has no member named ‘InsertNextTupleValue’
colors->InsertNextTupleValue (color);
#include<pcl/visualization/cloud_viewer.h> #include<iostream> #include<pcl/io/io.h> #include<pcl/io/pcd_io.h> int user_data; void viewerOneOff(pcl::visualization::PCLVisualizer& viewer) { viewer.setBackgroundColor (1.0, 0.5, 1.0); pcl::PointXYZ o; o.x = 1.0; o.y = 0; o.z = 0; viewer.addSphere (o, 0.25, "sphere", 0); std::cout << "i only run once" << std::endl; } void viewerPsycho(pcl::visualization::PCLVisualizer& viewer) { staticunsigned count = 0; std::stringstream ss; ss << "Once per viewer loop: " << count++; viewer.removeShape ("text", 0); viewer.addText (ss.str(), 200, 300, "text", 0); //FIXME: possible race condition here: user_data++; } int main() { // car6 x y z // colored_cloud x y z rbga pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZRGBA>); pcl::io::loadPCDFile ("colored_cloud.pcd", *cloud); pcl::visualization::CloudViewer viewer("Cloud Viewer"); //blocks until the cloud is actually rendered viewer.showCloud(cloud); //use the following functions to get access to the underlying more advanced/powerful //PCLVisualizer //This will only get called once viewer.runOnVisualizationThreadOnce (viewerOneOff); //This will get called once per visualization iteration viewer.runOnVisualizationThread (viewerPsycho); while (!viewer.wasStopped ()) { //you can also do cool processing here //FIXME: Note that this is running in a separate thread from viewerPsycho //and you should guard against race conditions yourself... user_data++; } return0; }
# set bin folder set(CMAKE_BINARY_DIR ${CMAKE_SOURCE_DIR}/bin) set(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}) set(LIBRARY_OUTPUT_PATH ${CMAKE_BINARY_DIR})
intmain(int, char *[]) { // This creates a polygonal cylinder model with eight circumferential facets // (i.e, in practice an octagonal prism). vtkSmartPointer<vtkCylinderSource> cylinder = vtkSmartPointer<vtkCylinderSource>::New(); cylinder->SetResolution(8);
// The mapper is responsible for pushing the geometry into the graphics library. // It may also do color mapping, if scalars or other attributes are defined. vtkSmartPointer<vtkPolyDataMapper> cylinderMapper = vtkSmartPointer<vtkPolyDataMapper>::New(); cylinderMapper->SetInputConnection(cylinder->GetOutputPort());
// The actor is a grouping mechanism: besides the geometry (mapper), it // also has a property, transformation matrix, and/or texture map. // Here we set its color and rotate it around the X and Y axes. vtkSmartPointer<vtkActor> cylinderActor = vtkSmartPointer<vtkActor>::New(); cylinderActor->SetMapper(cylinderMapper); cylinderActor->GetProperty()->SetColor(1.0000, 0.3882, 0.2784); cylinderActor->RotateX(30.0); cylinderActor->RotateY(-45.0);
// The renderer generates the image // which is then displayed on the render window. // It can be thought of as a scene to which the actor is added vtkSmartPointer<vtkRenderer> renderer = vtkSmartPointer<vtkRenderer>::New(); renderer->AddActor(cylinderActor); renderer->SetBackground(0.1, 0.2, 0.4); // Zoom in a little by accessing the camera and invoking its "Zoom" method. renderer->ResetCamera(); renderer->GetActiveCamera()->Zoom(1.5);
// The render window is the actual GUI window // that appears on the computer screen vtkSmartPointer<vtkRenderWindow> renderWindow = vtkSmartPointer<vtkRenderWindow>::New(); renderWindow->SetSize(200, 200); renderWindow->AddRenderer(renderer);
// The render window interactor captures mouse events // and will perform appropriate camera or actor manipulation // depending on the nature of the events. vtkSmartPointer<vtkRenderWindowInteractor> renderWindowInteractor = vtkSmartPointer<vtkRenderWindowInteractor>::New(); renderWindowInteractor->SetRenderWindow(renderWindow);
// This starts the event loop and as a side effect causes an initial render. renderWindowInteractor->Start();
#============================================================== # generate and link target for point_cloud_viewer #============================================================== set(TARGET_NAME aa_qt_main) add_executable(${TARGET_NAME}${SRC_LIST}${ui_FILES}${qrc_FILES})
# link qt libraries qt5_use_modules(${TARGET_NAME} Core Widgets OpenGL Xml Gui Sql)
# link vtk and pcl libraries target_link_libraries(${TARGET_NAME} ${CONFIG_TARGET} ${UTIL_TARGET} ${MODEL_TARGET} ${DETECTION_TARGET} ${DATABASE_TARGET}
If we reference qt dll which use QObject,when we include header in MainWindow.h,we must also include mocs_compilation.cpp in MainWindow.cpp. Otherwise error will occur:
In addition, building graphical Qt applications requires OpenGL libraries and headers installed. On Ubuntu and other Debian-based Linux systems you can get OpenGL and the minimal set of development tools by installing the packages libgl1-mesa-dev and build-essential, i.e. by running this command:
# Find includes in corresponding build directories set(CMAKE_INCLUDE_CURRENT_DIR ON) # Instruct CMake to run moc automatically when needed. set(CMAKE_AUTOMOC ON)
# ${QT_INCLUDE_DIRS} ${QT_LIBRARIES} and so on are all Qt4 Macro Definitions!!!!! # Find the QtWidgets library find_package(Qt5Core) find_package(Qt5Widgets) find_package(Qt5Gui) find_package(Qt5OpenGL) find_package(Qt5Xml)
/sbin/ldconfig.real: /usr/lib/nvidia-384/libEGL.so.1 is not a symbolic link /sbin/ldconfig.real: /usr/lib32/nvidia-384/libEGL.so.1 is not a symbolic link
Before you can use ROS, you will need to initialize rosdep. rosdep enables you to easily install system dependencies for source you want to compile and is required to run some core components in ROS.
1 2
sudo rosdep init rosdep update
will output
reading in sources list data from /etc/ros/rosdep/sources.list.d
Hit https://raw.githubusercontent.com/ros/rosdistro/master/rosdep/osx-homebrew.yaml
Hit https://raw.githubusercontent.com/ros/rosdistro/master/rosdep/base.yaml
Hit https://raw.githubusercontent.com/ros/rosdistro/master/rosdep/python.yaml
Hit https://raw.githubusercontent.com/ros/rosdistro/master/rosdep/ruby.yaml
Hit https://raw.githubusercontent.com/ros/rosdistro/master/releases/fuerte.yaml
Query rosdistro index https://raw.githubusercontent.com/ros/rosdistro/master/index.yaml
Add distro "groovy"
Add distro "hydro"
Add distro "indigo"
Add distro "jade"
Add distro "kinetic"
Add distro "lunar"
updated cache in /home/kezunlin/.ros/rosdep/sources.cache
... logging to /home/kezunlin/.ros/log/b777db6c-ff85-11e8-93c2-80fa5b47928a/roslaunch-ke-17139.log
Checking log directory for disk usage. This may take awhile.
Press Ctrl-C to interrupt
Done checking log file disk usage. Usage is <1GB.
started roslaunch server http://ke:36319/
ros_comm version 1.12.14
SUMMARY
========
PARAMETERS
* /rosdistro: kinetic
* /rosversion: 1.12.14
NODES
auto-starting new master
process[master]: started with pid [17162]
ROS_MASTER_URI=http://ke:11311/
setting /run_id to b777db6c-ff85-11e8-93c2-80fa5b47928a
process[rosout-1]: started with pid [17175]
started core service [/rosout]
^C[rosout-1] killing on exit
[master] killing on exit
shutting down processing monitor...
... shutting down processing monitor complete
done
Create Workspace
Create
Let’s create and build a catkin workspace:
1 2 3 4 5 6
mkdir -p ~/catkin_ws/src cd ~/catkin_ws/ catkin_make
To make sure your workspace is properly overlayed by the setup script, make sure ROS_PACKAGE_PATH environment variable includes the directory you’re in.
While Caffe is made for deep networks it can likewise represent “shallow” models like logistic regression for classification. We’ll do simple logistic regression on synthetic data that we’ll generate and save to HDF5 to feed vectors to Caffe. Once that model is done, we’ll add layers to improve accuracy. That’s what Caffe is about: define a model, experiment, and then deploy.
# Write out the data to HDF5 files in a temp directory. # This file is assumed to be caffe_root/examples/hdf5_classification.ipynb dirname = os.path.abspath('./examples/hdf5_classification/data') ifnot os.path.exists(dirname): os.makedirs(dirname)
# HDF5DataLayer source should be a file containing a list of HDF5 filenames. # To show this off, we'll list the same data file twice. with h5py.File(train_filename, 'w') as f: f['data'] = X f['label'] = y.astype(np.float32) withopen(os.path.join(dirname, 'train.txt'), 'w') as f: f.write(train_filename + '\n') f.write(train_filename + '\n') # HDF5 is pretty efficient, but can be further compressed. comp_kwargs = {'compression': 'gzip', 'compression_opts': 1} with h5py.File(test_filename, 'w') as f: f.create_dataset('data', data=Xt, **comp_kwargs) f.create_dataset('label', data=yt.astype(np.float32), **comp_kwargs) withopen(os.path.join(dirname, 'test.txt'), 'w') as f: f.write(test_filename + '\n')
Let’s define logistic regression in Caffe through Python net specification. This is a quick and natural way to define nets that sidesteps manually editing the protobuf model.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
from caffe import layers as L from caffe import params as P
train_net_path = 'examples/hdf5_classification/logreg_auto_train.prototxt' withopen(train_net_path, 'w') as f: f.write(str(logreg('examples/hdf5_classification/data/train.txt', 10)))
test_net_path = 'examples/hdf5_classification/logreg_auto_test.prototxt' withopen(test_net_path, 'w') as f: f.write(str(logreg('examples/hdf5_classification/data/test.txt', 10)))
Now, we’ll define our “solver” which trains the network by specifying the locations of the train and test nets we defined above, as well as setting values for various parameters used for learning, display, and “snapshotting”.
defsolver(train_net_path, test_net_path): s = caffe_pb2.SolverParameter()
# Specify locations of the train and test networks. s.train_net = train_net_path s.test_net.append(test_net_path)
s.test_interval = 1000# Test after every 1000 training iterations. s.test_iter.append(250) # Test 250 "batches" each time we test.
s.max_iter = 10000# # of times to update the net (training iterations)
# Set the initial learning rate for stochastic gradient descent (SGD). s.base_lr = 0.01
# Set `lr_policy` to define how the learning rate changes during training. # Here, we 'step' the learning rate by multiplying it by a factor `gamma` # every `stepsize` iterations. s.lr_policy = 'step' s.gamma = 0.1 s.stepsize = 5000
# Set other optimization parameters. Setting a non-zero `momentum` takes a # weighted average of the current gradient and previous gradients to make # learning more stable. L2 weight decay regularizes learning, to help prevent # the model from overfitting. s.momentum = 0.9 s.weight_decay = 5e-4
# Display the current training loss and accuracy every 1000 iterations. s.display = 1000
# Snapshots are files used to store networks we've trained. Here, we'll # snapshot every 10K iterations -- just once at the end of training. # For larger networks that take longer to train, you may want to set # snapshot < max_iter to save the network and training state to disk during # optimization, preventing disaster in case of machine crashes, etc. s.snapshot = 10000 s.snapshot_prefix = 'examples/hdf5_classification/data/train'
# We'll train on the CPU for fair benchmarking against scikit-learn. # Changing to GPU should result in much faster training! s.solver_mode = caffe_pb2.SolverParameter.CPU return s
solver_path = 'examples/hdf5_classification/logreg_solver.prototxt' withopen(solver_path, 'w') as f: f.write(str(solver(train_net_path, test_net_path)))
Time to learn and evaluate our Caffeinated logistic regression in Python.
I0224 00:32:03.232779 655 caffe.cpp:178] Use CPU.
I0224 00:32:03.391911 655 solver.cpp:48] Initializing solver from parameters:
train_net: "examples/hdf5_classification/logreg_auto_train.prototxt"
test_net: "examples/hdf5_classification/logreg_auto_test.prototxt"
......
I0224 00:32:04.087514 655 solver.cpp:406] Test net output #0: accuracy = 0.77
I0224 00:32:04.087532 655 solver.cpp:406] Test net output #1: loss = 0.593815 (* 1 = 0.593815 loss)
I0224 00:32:04.087541 655 solver.cpp:323] Optimization Done.
I0224 00:32:04.087548 655 caffe.cpp:222] Optimization Done.
If you look at output or the logreg_auto_train.prototxt, you’ll see that the model is simple logistic regression. We can make it a little more advanced by introducing a non-linearity between weights that take the input and weights that give the output – now we have a two-layer network. That network is given in nonlinear_auto_train.prototxt, and that’s the only change made in nonlinear_logreg_solver.prototxt which we will now use.
The final accuracy of the new network should be higher than logistic regression!
from caffe import layers as L from caffe import params as P
defnonlinear_net(hdf5, batch_size): # one small nonlinearity, one leap for model kind n = caffe.NetSpec() n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2) # define a hidden layer of dimension 40 n.ip1 = L.InnerProduct(n.data, num_output=40, weight_filler=dict(type='xavier')) # transform the output through the ReLU (rectified linear) non-linearity n.relu1 = L.ReLU(n.ip1, in_place=True) # score the (now non-linear) features n.ip2 = L.InnerProduct(n.ip1, num_output=2, weight_filler=dict(type='xavier')) # same accuracy and loss as before n.accuracy = L.Accuracy(n.ip2, n.label) n.loss = L.SoftmaxWithLoss(n.ip2, n.label) return n.to_proto()
train_net_path = 'examples/hdf5_classification/nonlinear_auto_train.prototxt' withopen(train_net_path, 'w') as f: f.write(str(nonlinear_net('examples/hdf5_classification/data/train.txt', 10)))
test_net_path = 'examples/hdf5_classification/nonlinear_auto_test.prototxt' withopen(test_net_path, 'w') as f: f.write(str(nonlinear_net('examples/hdf5_classification/data/test.txt', 10)))
solver_path = 'examples/hdf5_classification/nonlinear_logreg_solver.prototxt' withopen(solver_path, 'w') as f: f.write(str(solver(train_net_path, test_net_path)))
Github recommends us to use Jekyll to manage static pages, which is based on Ruby and is difficult for us to install and configure. So we use Hexo instead. Hexo is a static blog framework similar to Jekyll ,which is based on Node.js and easier for use to use.
use Github to create repo
create a new repo in github, name by username.github.io: kezunlin.github.io
Setting | Github Pages, choose a theame and deploy.
# download and compile wget https://nodejs.org/dist/v8.9.3/node-v8.9.3.tar.gz tar xzvf node-v8.9.3.tar.gz cd node-v8.9.3 ./configure make -j8 sudo make install
# link to /usr/bin sudoln -s /usr/local/bin/node /usr/bin/node sudoln -s /usr/local/bin/npm /usr/bin/npm
hexo new "postName"#new post hexo new page "pageName"#new page hexo generate #generate static files to public/ hexo server #start server on localhost:4000 hexo deploy #push .deploy_git/ to GitHub hexo clean #clean files
Hexo short commands:
1 2 3 4
hexo n == hexo new hexo g == hexo generate hexo s == hexo server hexo d == hexo deploy
Hexo composite commands:
1 2
hexo server -g hexo deploy -g
front-matter
1 2 3 4 5 6 7 8 9 10
--- title: Using Github Pages and Hexo to manage personal blogs date: 2017-12-26 17:28:10 categories: tutorial tags: - github pages - hexo - nodejs - npm ---
local_search: enable:true# create a new 'Search' button next to 'Archives' # if auto, trigger search by changing input # if manual, trigger search by pressing enter key or search button trigger:auto # show top n results per article, show all results by setting to -1 top_n_per_article:1
busuanzi_count: # count values only if the other configs are false enable:true # custom uv span for the whole site site_uv:true site_uv_header:<iclass="fafa-user"></i> site_uv_footer:Visitors # custom pv span for the whole site site_pv:true site_pv_header:<iclass="fafa-eye"></i> site_pv_footer:TotalVisits # custom pv span for one page only page_pv:true page_pv_header:<iclass="fafa-eye"></i> page_pv_footer:Reads
gitment for comment (not)
We can use github repo to store blog site’s comments in issues
create a new repo named gitment in Github for storing comments in issues
edit blog\themes\next\_config.yml
1 2 3 4 5 6 7 8 9 10 11 12 13
gitment: enable:true mint:true# RECOMMEND, A mint on Gitment, to support count, language and proxy_gateway count:true# Show comments count in post meta area lazy:true# Comments lazy loading with a button cleanly:false# Hide 'Powered by ...' on footer, and more language:zh-Hans# Force language, or auto switch by theme github_user:kezunlin# MUST HAVE, Your Github ID github_repo:gitment# MUST HAVE, The repo you use to store Gitment comments client_id:xxx# MUST HAVE, Github client id for the Gitment client_secret:yyy# EITHER this or proxy_gateway, Github access secret token for the Gitment proxy_gateway:# Address of api proxy, See: https://github.com/aimingoo/intersect redirect_protocol:# Protocol of redirect_uri with force_redirect_protocol when mint enabled
> hexo server (node:7563) [DEP0061] DeprecationWarning: fs.SyncWriteStream is deprecated. INFO Start processing FATAL Something's wrong. Maybe you can find the solution here: http://hexo.io/docs/troubleshooting.html Error: watch /media/kezunlin/Workspace/git/blog/source/_posts ENOSPC
solution:
1
echo fs.inotify.max_user_watches=524288 | sudotee -a /etc/sysctl.conf && sudo sysctl -p
ERROR Local hexo not found in
1 2
hexo -v ERROR Local hexo not found in /home/kezunlin/git/blog