本文整理汇总了C++中image_geometry::PinholeCameraModel类的典型用法代码示例。如果您正苦于以下问题:C++ PinholeCameraModel类的具体用法?C++ PinholeCameraModel怎么用?C++ PinholeCameraModel使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PinholeCameraModel类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: getCameraRay
void getCameraRay(const image_geometry::PinholeCameraModel& model, const cv::Point2d& pt, cv::Point3d* ray)
{
cv::Point2d rect_point;
rect_point = model.rectifyPoint(pt);
ROS_DEBUG("Rect Point: %f, %f",rect_point.x,rect_point.y);
*ray = model.projectPixelTo3dRay(rect_point);
}
开发者ID:contradict,项目名称:SampleReturn,代码行数:7,代码来源:camera_ray.cpp
示例2: cloud
pcl::PointCloud<pcl::PointXYZ>::Ptr Conversions::toPointCloud(const Eigen::Isometry3d &T, const image_geometry::PinholeCameraModel &cam, const cv::Mat &depth_img)
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>());
cloud->header.frame_id = "cam";
cloud->is_dense = false;
cloud->height = depth_img.rows;
cloud->width = depth_img.cols;
cloud->sensor_origin_ = Eigen::Vector4f( T.translation()(0), T.translation()(1), T.translation()(2), 1.f );
cloud->sensor_orientation_ = Eigen::Quaternionf(T.rotation().cast<float>());
cloud->points.resize( cloud->height * cloud->width );
size_t idx = 0;
const float* depthdata = reinterpret_cast<float*>( &depth_img.data[0] );
for(int v = 0; v < depth_img.rows; ++v) {
for(int u = 0; u < depth_img.cols; ++u) {
pcl::PointXYZ& p = cloud->points[ idx ]; ++idx;
float d = (*depthdata++);
if (d > 0 && !isnan(d)) {
p.z = d;
p.x = (u - cam.cx()) * d / cam.fx();
p.y = (v - cam.cy()) * d / cam.fy();
} else {
p.x = std::numeric_limits<float>::quiet_NaN();
p.y = std::numeric_limits<float>::quiet_NaN();
p.z = std::numeric_limits<float>::quiet_NaN();
}
}
}
return cloud;
}
开发者ID:einsnull,项目名称:uzliti_slam,代码行数:32,代码来源:conversions.cpp
示例3: generateStartPoints
void FindObjectOnPlane::generateStartPoints(
const cv::Point2f& point_2d,
const image_geometry::PinholeCameraModel& model,
const pcl::ModelCoefficients::Ptr& coefficients,
std::vector<cv::Point3f>& search_points_3d,
std::vector<cv::Point2f>& search_points_2d)
{
NODELET_INFO("generateStartPoints");
jsk_recognition_utils::Plane::Ptr plane
(new jsk_recognition_utils::Plane(coefficients->values));
cv::Point3d ray = model.projectPixelTo3dRay(point_2d);
Eigen::Vector3f projected_point = rayPlaneInteersect(ray, plane);
const double resolution_step = 0.01;
const int resolution = 5;
search_points_3d.clear();
search_points_2d.clear();
for (int i = - resolution; i < resolution; ++i) {
for (int j = - resolution; j < resolution; ++j) {
double x_diff = resolution_step * i;
double y_diff = resolution_step * j;
Eigen::Vector3f moved_point = projected_point + Eigen::Vector3f(x_diff, y_diff, 0);
Eigen::Vector3f projected_moved_point;
plane->project(moved_point, projected_moved_point);
cv::Point3f projected_moved_point_cv(projected_moved_point[0],
projected_moved_point[1],
projected_moved_point[2]);
search_points_3d.push_back(cv::Point3f(projected_moved_point_cv));
cv::Point2d p2d = model.project3dToPixel(projected_moved_point_cv);
search_points_2d.push_back(p2d);
}
}
}
开发者ID:YoheiKakiuchi,项目名称:jsk_recognition,代码行数:32,代码来源:find_object_on_plane_nodelet.cpp
示例4: projectPoints
void projectPoints(const image_geometry::PinholeCameraModel &cam_model,
const cv::Point3d &points3D,
cv::Point2d *points2D)
{
*points2D = cam_model.project3dToPixel(points3D);
// *points2D = cam_model.rectifyPoint(*points2D);
}
开发者ID:pablospe,项目名称:calibration,代码行数:7,代码来源:projection.cpp
示例5: PointFromPixel
pcl::PointXYZ PointFromPixel(const cv::Point& pixel, const tf::Transform& cameraFrameToWorldFrame, image_geometry::PinholeCameraModel cam) {
cv::Point3d cameraRay = cam.projectPixelTo3dRay(pixel);
tf::Point worldCameraOrigin = cameraFrameToWorldFrame * tf::Vector3(0, 0, 0);
tf::Point worldCameraStep = cameraFrameToWorldFrame * tf::Vector3(cameraRay.x, cameraRay.y, cameraRay.z) - worldCameraOrigin;
double zScale = -worldCameraOrigin.z()/worldCameraStep.z();
tf::Point ret = worldCameraOrigin + zScale * worldCameraStep;
return pcl::PointXYZ(ret.x(), ret.y(), 0);
}
开发者ID:JasonGibson274,项目名称:igvc-software,代码行数:8,代码来源:CVUtils.hpp
示例6: rayPlaneInteersect
cv::Point2d FindObjectOnPlane::getUyEnd(
const cv::Point2d& ux_start,
const cv::Point2d& ux_end,
const image_geometry::PinholeCameraModel& model,
const jsk_recognition_utils::Plane::Ptr& plane)
{
cv::Point3d start_ray = model.projectPixelTo3dRay(ux_start);
cv::Point3d end_ray = model.projectPixelTo3dRay(ux_end);
Eigen::Vector3f ux_start_3d = rayPlaneInteersect(start_ray, plane);
Eigen::Vector3f ux_end_3d = rayPlaneInteersect(end_ray, plane);
Eigen::Vector3f ux_3d = ux_end_3d - ux_start_3d;
Eigen::Vector3f normal = plane->getNormal();
Eigen::Vector3f uy_3d = normal.cross(ux_3d).normalized();
Eigen::Vector3f uy_end_3d = ux_start_3d + uy_3d;
cv::Point2d uy_end = model.project3dToPixel(cv::Point3d(uy_end_3d[0],
uy_end_3d[1],
uy_end_3d[2]));
return uy_end;
}
开发者ID:YoheiKakiuchi,项目名称:jsk_recognition,代码行数:19,代码来源:find_object_on_plane_nodelet.cpp
示例7: round
cv::Point3d GraphGridMapper::to3D(cv::Point3d &p, const Eigen::Isometry3d &camera_transform, const image_geometry::PinholeCameraModel &camera_model)
{
int width = camera_model.cameraInfo().width;
int height = camera_model.cameraInfo().height;
int u = round(p.x);
if(u < 0) {
u = 0;
} else if (u >= width) {
u = width -1;
}
int v = round(p.y);
if(v < 0) {
v = 0;
} else if (v >= height) {
v = height - 1;
}
cv::Point3d p3D(-1.0,-1.0,std::numeric_limits<double>::infinity());
if (p.z != 0 && !isnan(p.z))
{
p3D.x = (u - camera_model.cx()) * p.z / camera_model.fx();
p3D.y = (v - camera_model.cy()) * p.z / camera_model.fy();
p3D.z = p.z;
Eigen::Vector3d vec(p3D.x, p3D.y, p3D.z);
vec = camera_transform * vec.homogeneous();
p3D.x = vec(0);
p3D.y = vec(1);
p3D.z = vec(2);
}
return p3D;
}
开发者ID:einsnull,项目名称:uzliti_slam,代码行数:34,代码来源:graph_grid_mapper.cpp
示例8: project_uv_to_cloud_index
size_t project_uv_to_cloud_index(const pcl::PointCloud<PointT>& cloud, const cv::Point2d& image_point,
const image_geometry::PinholeCameraModel camera_model, double& distance)
{
// Assumes camera is at origin, pointed in the normal direction
size_t pt_pcl_index;
cv::Point3d pt_cv;
pt_cv = camera_model.projectPixelTo3dRay(image_point);
Eigen::Vector3f direction_eig(pt_cv.x, pt_cv.y, pt_cv.z);
Eigen::Vector3f origin_eig(0.0, 0.0, 0.0);
pt_pcl_index = closest_point_index_rayOMP(cloud, direction_eig, origin_eig, distance);
return (pt_pcl_index);
}
开发者ID:DSsoto,项目名称:Sub8,代码行数:13,代码来源:geometry.hpp
示例9: hit_same_point
inline void
hit_same_point( const image_geometry::PinholeCameraModel &camera_model,
const pcl::PointCloud<PointT> &in,
pcl::PointCloud<PointT> &out,
int rows,
int cols,
float z_threshold = 0.3)
{
std::vector<std::vector <std::vector<PointT> > > hit( cols, std::vector< std::vector<PointT> >(rows, std::vector<PointT>()));
// Project points into image space
for(unsigned int i=0; i < in.points.size(); ++i){
const PointT* pt = &in.points.at(i);
if( pt->z > 1) { // min distance from camera 1m
cv::Point2i point_image = camera_model.project3dToPixel(cv::Point3d(pt->x, pt->y, pt->z));
if( between<int>(0, point_image.x, cols )
&& between<int>( 0, point_image.y, rows )
)
{
// Sort all points into image
{
hit[point_image.x][point_image.y].push_back(in.points.at(i));
}
}
}
}
assert(out.empty());
for(int x = 0; x < hit.size(); ++x ){
for(int y = 0; y < hit[0].size(); ++y){
if(hit[x][y].size()>1){
PointT min_z = hit[x][y][0];
float max_z = min_z.z;
for(int p = 1; p < hit[x][y].size(); ++p){
// find min and max z
max_z = MAX(max_z, hit[x][y][p].z);
#ifdef DEBUG
std::cout << hit[x][y].size() << "\t";
#endif
if(hit[x][y][p].z < min_z.z)
{
min_z = hit[x][y][p];
}
}
#ifdef DEBUG
std::cout << min_z.z << "\t" << max_z << "\t";
#endif
if(max_z - min_z.z > z_threshold){
#ifdef DEBUG
std::cout << min_z << std::endl;
#endif
out.push_back(min_z);
}
}
}
}
#ifdef DEBUG
std::cout << "hit_same_point in: "<< in.size() << "\t out: " << out.size() << "\n";
#endif
}
开发者ID:droter,项目名称:ros-image_cloud,代码行数:63,代码来源:hit_same_point.hpp
示例10: PointXYZtoCameraPointXY
void PointXYZtoCameraPointXY(const geometry_msgs::Point input, geometry_msgs::Point &output, const image_geometry::PinholeCameraModel& model) {
output.x = (input.x*model.fx()/input.z)+model.cx();
output.y = (input.y*model.fy()/input.z)+model.cy();
}
开发者ID:aminabedi1368,项目名称:Sepanta3,代码行数:4,代码来源:image_conversions.cpp
示例11: filter_depth_projection
inline void
filter_depth_projection( const image_geometry::PinholeCameraModel &camera_model,
const pcl::PointCloud<PointT> &in,
pcl::PointCloud<PointT> &out,
int rows,
int cols,
int k_neighbors = 2,
float threshold = 0.3)
{
std::vector<std::vector<bool> > hit( cols, std::vector<bool>(rows));
std::vector<int> points2d_indices;
pcl::PointCloud<pcl::PointXY> points2d;
pcl::PointCloud<PointT> z_filtred;
#ifdef DEBUG
std::cout << "in points: "<< in.size() << " width: " << cols << " height: " << rows << "\n";
#endif
project2d::Points2d<PointT> point_map;
point_map.init(camera_model, in, rows, cols);
point_map.get_points(z_filtred, 25);
// Project points into image space
for(unsigned int i=0; i < z_filtred.size(); ++i){
const PointT* pt = &z_filtred.points.at(i);
//if( pt->z > 1)
{ // min distance from camera 1m
cv::Point2i point_image = camera_model.project3dToPixel(cv::Point3d(pt->x, pt->y, pt->z));
if( between<int>(0, point_image.x, cols )
&& between<int>( 0, point_image.y, rows )
)
{
// Point allready at this position?
if(!hit[point_image.x][point_image.y]){
hit[point_image.x][point_image.y] = true;
pcl::PointXY p_image;
p_image.x = point_image.x;
p_image.y = point_image.y;
points2d.push_back(p_image);
points2d_indices.push_back(i);
}
else{
#ifdef DEBUG
std::cout << "[" << point_image.x << "][" << point_image.y << "] already inserted " << pt << "\n";
#endif
}
}
}
}
#ifdef DEBUG
std::cout << "Z_filtred: " << z_filtred.size() << " projected 2d points: "<< points2d.size() << " indices: " << points2d_indices.size() << "\n";
#endif
pcl::search::KdTree<pcl::PointXY> tree_n;
tree_n.setInputCloud(points2d.makeShared());
tree_n.setEpsilon(0.5);
for(unsigned int i=0; i < points2d.size(); ++i){
std::vector<int> k_indices;
std::vector<float> square_distance;
//tree_n.nearestKSearch(points2d.at(i), k_neighbors, k_indices, square_distance);
tree_n.radiusSearch(points2d.at(i), k_neighbors, k_indices, square_distance);
look_up_indices(points2d_indices, k_indices);
float distance_value;
if(is_edge_threshold(z_filtred, points2d_indices.at(i), k_indices, square_distance, distance_value, threshold)){
out.push_back(z_filtred.points.at(points2d_indices.at(i)));
out.at(out.size()-1).intensity = sqrt(distance_value);
}
}
#ifdef DEBUG
std::cout << "out 2d points: "<< out.size() << "\n";
#endif
}
开发者ID:droter,项目名称:ros-image_cloud,代码行数:84,代码来源:filter_depth_projection.hpp
示例12: remove_cluster_2d
inline void
remove_cluster_2d( const image_geometry::PinholeCameraModel &camera_model,
const pcl::PointCloud<PointT> &in,
pcl::PointCloud<PointT> &out,
int rows,
int cols,
int k_neighbors = 4,
int border = 25)
{
std::vector<int> points2d_indices;
pcl::PointCloud<pcl::PointXY> points2d;
#ifdef DEBUG
std::cout << "in points: "<< in.size() << "\n";
#endif
// Project points into image space
for(unsigned int i=0; i < in.points.size(); ++i){
const PointT* pt = &in.points.at(i);
if( pt->z > 1) { // min distance from camera 1m
cv::Point2i point_image = camera_model.project3dToPixel(cv::Point3d(pt->x, pt->y, pt->z));
if( between<int>(0+border, point_image.x, cols-border )
&& between<int>( 0+border, point_image.y, rows-border )
)
{
pcl::PointXY p_image;
p_image.x = point_image.x;
p_image.y = point_image.y;
points2d.push_back(p_image);
points2d_indices.push_back(i);
}
}
}
#ifdef DEBUG
std::cout << "projected 2d points: "<< points2d.size() << " indices: " << points2d_indices.size() << "\n";
#endif
pcl::search::KdTree<pcl::PointXY> tree_n;
tree_n.setInputCloud(points2d.makeShared());
tree_n.setEpsilon(0.1);
for(unsigned int i=0; i < points2d.size(); ++i){
std::vector<int> k_indices;
std::vector<float> square_distance;
//tree_n.nearestKSearch(points2d.at(i), k_neighbors, k_indices, square_distance);
tree_n.radiusSearch(points2d.at(i), k_neighbors, k_indices, square_distance);
if(k_indices.empty()) continue; // Dont add points without neighbors
look_up_indices(points2d_indices, k_indices);
float edginess = 0;
if(is_edge_z(in, points2d_indices.at(i), k_indices, square_distance, edginess, 0.1)){
out.push_back(in.points.at(points2d_indices.at(i)));
out.at(out.size()-1).intensity = edginess;
}
}
#ifdef DEBUG
std::cout << "out 2d points: "<< out.size() << "\n";
#endif
}
开发者ID:droter,项目名称:ros-image_cloud,代码行数:67,代码来源:remove_cluster_2d.hpp
示例13: infoCallback
void infoCallback(const sensor_msgs::CameraInfoConstPtr& info_msg)
{
if (calibrated)
return;
cam_model_.fromCameraInfo(info_msg);
pattern_detector_.setCameraMatrices(cam_model_.intrinsicMatrix(), cam_model_.distortionCoeffs());
calibrated = true;
image_sub_ = nh_.subscribe("/camera/rgb/image_mono", 1, &CalibrateKinectCheckerboard::imageCallback, this);
ROS_INFO("[calibrate] Got image info!");
}
开发者ID:chazmatazz,项目名称:clam,代码行数:12,代码来源:calibrate_kinect_checkerboard.cpp
示例14: camerainfoCb
void camerainfoCb(const sensor_msgs::CameraInfoConstPtr& info_msg)
{
ROS_INFO("infocallback :shutting down camerainfosub");
cam_model_.fromCameraInfo(info_msg);
camera_topic = info_msg->header.frame_id;
camerainfosub_.shutdown();
}
开发者ID:k-okada,项目名称:jsk_smart_apps,代码行数:7,代码来源:3dtopixel.cpp
示例15: overlayPoints
void overlayPoints(pcl::PointCloud<pcl::PointXYZ> detector_points, tf::Transform &transform, cv_bridge::CvImagePtr& image)
{
// Overlay calibration points on the image
pcl::PointCloud<pcl::PointXYZ> transformed_detector_points;
pcl_ros::transformPointCloud(detector_points, transformed_detector_points, transform);
int font_face = cv::FONT_HERSHEY_SCRIPT_SIMPLEX;
double font_scale = 1;
int thickness = 2;
int radius = 5;
for (unsigned int i=0; i < transformed_detector_points.size(); i++)
{
pcl::PointXYZ pt = transformed_detector_points[i];
cv::Point3d pt_cv(pt.x, pt.y, pt.z);
cv::Point2d uv;
uv = cam_model_.project3dToPixel(pt_cv);
cv::circle(image->image, uv, radius, CV_RGB(255,0,0), -1);
cv::Size text_size;
int baseline = 0;
std::stringstream out;
out << i+1;
text_size = cv::getTextSize(out.str(), font_face, font_scale, thickness, &baseline);
cv::Point origin = cvPoint(uv.x - text_size.width / 2,
uv.y - radius - baseline/* - thickness*/);
cv::putText(image->image, out.str(), origin, font_face, font_scale, CV_RGB(255,0,0), thickness);
}
}
开发者ID:chazmatazz,项目名称:clam,代码行数:32,代码来源:calibrate_kinect_checkerboard.cpp
示例16: cam_info_cb
void cam_info_cb(const sensor_msgs::CameraInfo::ConstPtr& msg)
{
if( cam_model_.fromCameraInfo(msg) )
{
got_cam_info_ = true;
ROS_INFO("[bk_skeletal_tracker] Got RGB camera info.");
} else {
ROS_ERROR("[bk_skeletal_tracker] Couldn't read camera info.");
}
}
开发者ID:Aharobot,项目名称:bk-ros,代码行数:10,代码来源:main.cpp
示例17: doOverlay
void doOverlay(const sensor_msgs::ImageConstPtr& img_msg,
const sensor_msgs::CameraInfoConstPtr& info_msg) {
// convert camera image into opencv
cam_model.fromCameraInfo(info_msg);
cv_bridge::CvImagePtr cv_img = cv_bridge::toCvCopy(img_msg, sensor_msgs::image_encodings::RGB8);
double alpha_mult;
ros::param::param<double>("~alpha_mult", alpha_mult, 0.5);
uint8_t r, g, b;
if(aligned_pc) {
if(!tf_list->waitForTransform(img_msg->header.frame_id, "/base_link",
img_msg->header.stamp, ros::Duration(3)))
return;
tf::StampedTransform transform;
tf_list->lookupTransform(img_msg->header.frame_id, "/base_link",
img_msg->header.stamp, transform);
PCRGB::Ptr tf_pc(new PCRGB());
pcl_ros::transformPointCloud<PRGB>(*aligned_pc, *tf_pc, transform);
for(uint32_t i=0;i<tf_pc->size();i++) {
cv::Point3d proj_pt_cv(tf_pc->points[i].x, tf_pc->points[i].y,
tf_pc->points[i].z);
cv::Point pt2d = cam_model.project3dToPixel(proj_pt_cv);
extractRGB(tf_pc->points[i].rgb, r, g, b);
if(pt2d.x >= 0 && pt2d.y >= 0 &&
pt2d.x < cv_img->image.rows && pt2d.y < cv_img->image.cols) {
double old_r = cv_img->image.at<cv::Vec3b>(pt2d.y, pt2d.x)[0];
double old_g = cv_img->image.at<cv::Vec3b>(pt2d.y, pt2d.x)[1];
double old_b = cv_img->image.at<cv::Vec3b>(pt2d.y, pt2d.x)[2];
cv_img->image.at<cv::Vec3b>(pt2d.y, pt2d.x)[0] =
(uint8_t) min(alpha_mult*old_r+r, 255.0);
cv_img->image.at<cv::Vec3b>(pt2d.y, pt2d.x)[1] =
(uint8_t) min(alpha_mult*old_g+g, 255.0);
cv_img->image.at<cv::Vec3b>(pt2d.y, pt2d.x)[2] =
(uint8_t) min(alpha_mult*old_b+b, 255.0);
}
}
}
overlay_pub.publish(cv_img->toImageMsg());
}
开发者ID:gt-ros-pkg,项目名称:hrl,代码行数:42,代码来源:head_alignment_confirm.cpp
示例18: convert
void convert(const sensor_msgs::ImageConstPtr& depth_msg, PointCloud::Ptr& cloud_msg)
{
// Use correct principal point from calibration
float center_x = cameraModel.cx();
float center_y = cameraModel.cy();
// Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y)
double unit_scaling = DepthTraits<T>::toMeters( T(1) );
float constant_x = unit_scaling / cameraModel.fx();
float constant_y = unit_scaling / cameraModel.fy();
float bad_point = std::numeric_limits<float>::quiet_NaN();
sensor_msgs::PointCloud2Iterator<float> iter_x(*cloud_msg, "x");
sensor_msgs::PointCloud2Iterator<float> iter_y(*cloud_msg, "y");
sensor_msgs::PointCloud2Iterator<float> iter_z(*cloud_msg, "z");
const T* depth_row = reinterpret_cast<const T*>(&depth_msg->data[0]);
int row_step = depth_msg->step / sizeof(T);
for (int v = 0; v < (int)cloud_msg->height; ++v, depth_row += row_step)
{
for (int u = 0; u < (int)cloud_msg->width; ++u, ++iter_x, ++iter_y, ++iter_z)
{
T depth = depth_row[u];
// Missing points denoted by NaNs
if (!DepthTraits<T>::valid(depth))
{
*iter_x = *iter_y = *iter_z = bad_point;
continue;
}
// Fill in XYZ
*iter_x = (u - center_x) * depth * constant_x;
*iter_y = (v - center_y) * depth * constant_y;
*iter_z = DepthTraits<T>::toMeters(depth);
}
}
}
开发者ID:brunogouveia,项目名称:hand_shake,代码行数:37,代码来源:Teste.cpp
示例19: objectSrv
bool objectSrv(jsk_smart_gui::point2screenpoint::Request &req,
jsk_smart_gui::point2screenpoint::Response &res)
{
ROS_INFO("3dtopixel request:x=%lf,y=%lf,z=%lf",req.point.point.x,req.point.point.y,req.point.point.z);
geometry_msgs::PointStamped point_transformed;
tf_listener_.transformPoint(camera_topic, req.point, point_transformed);
cv::Point3d xyz;
cv::Point2d uv_rect;
xyz.x = point_transformed.point.x;
xyz.y = point_transformed.point.y;
xyz.z = point_transformed.point.z;
uv_rect = cam_model_.project3dToPixel(xyz);
res.x=uv_rect.x;
res.y=uv_rect.y;
return true;
}
开发者ID:k-okada,项目名称:jsk_smart_apps,代码行数:16,代码来源:3dtopixel.cpp
示例20: infoCb
void infoCb(const sensor_msgs::CameraInfo& msg)
{
model.fromCameraInfo(msg);
//SHUTDOWN LATER #HACK
}
开发者ID:ashokzg,项目名称:cpb,代码行数:5,代码来源:locator.cpp
注:本文中的image_geometry::PinholeCameraModel类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论