本文整理汇总了C++中sample函数的典型用法代码示例。如果您正苦于以下问题:C++ sample函数的具体用法?C++ sample怎么用?C++ sample使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sample函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: while
void PanoramaTracker::run() {
while (isRunning() && m_scaled.size() <= MAX_TRACKER_FRAMES) {
QScopedPointer<QtCamGstSample> sample(m_input->sample());
if (!sample) {
continue;
}
if (!Tracker::isInitialized()) {
QSize size = QSize(sample->width(), sample->height());
int m_width = size.width() > 720 ? size.width() / 8 : size.width() / 4;
int m_height = size.width() > 720 ? size.height() / 8 : size.height() / 4;
m_inputSize = size;
// TODO: This should be 5.0 but we fail to stitch sometimes if we set it to 5
if (!Tracker::initialize(m_width, m_height, 2.0f)) {
emit error(Panorama::ErrorTrackerInit);
return;
}
}
// Now we can process the sample:
const guint8 *src = sample->data();
QScopedArrayPointer<guint8>
dst(new guint8[m_inputSize.width() * m_inputSize.height() * 3 / 2]);
enum libyuv::FourCC fmt;
switch (sample->format()) {
case GST_VIDEO_FORMAT_UYVY:
fmt = libyuv::FOURCC_UYVY;
break;
default:
qCritical() << "Unsupported color format";
emit error(Panorama::ErrorTrackerFormat);
return;
}
guint8 *y = dst.data(),
*u = y + m_inputSize.width() * m_inputSize.height(),
*v = u + m_inputSize.width()/2 * m_inputSize.height()/2;
if (ConvertToI420(src, sample->size(),
y, m_inputSize.width(),
u, m_inputSize.width() / 2,
v, m_inputSize.width() / 2,
0, 0,
m_inputSize.width(), m_inputSize.height(),
m_inputSize.width(), m_inputSize.height(),
libyuv::kRotate0, fmt) != 0) {
emit error(Panorama::ErrorTrackerConvert);
return;
}
QScopedArrayPointer<guint8> scaled(new guint8[m_width * m_height * 3 / 2]);
guint8 *ys = scaled.data(),
*us = ys + m_width * m_height,
*vs = us + m_width/2 * m_height/2;
// Now scale:
// No need for error checking because the function always returns 0
libyuv::I420Scale(y, m_inputSize.width(),
u, m_inputSize.width()/2,
v, m_inputSize.width()/2,
m_inputSize.width(), m_inputSize.height(),
ys, m_width,
us, m_width/2,
vs, m_width/2,
m_width, m_height,
libyuv::kFilterBilinear);
int err = addFrame(scaled.data());
if (err >= 0) {
m_scaled.push_back(scaled.take());
m_frames.push_back(dst.take());
emit frameCountChanged();
}
}
}
开发者ID:ballock,项目名称:cameraplus,代码行数:80,代码来源:panoramatracker.cpp
示例2: sample_z
kernel void sample_z(global int *cur_y,
global int *cur_z,
global int *cur_r,
global int *z_by_ry,
global int *z_col_sum,
global int *obs,
global float *rand,
uint N, uint D, uint K, uint f_img_width,
float lambda, float epislon, float theta) {
const uint V_SCALE = 0, H_SCALE = 1, V_TRANS = 2, H_TRANS = 3, NUM_TRANS = 4;
uint h, w, new_index; // variables used in the for loop
uint nth = get_global_id(0); // n is the index of data
uint kth = get_global_id(1); // k is the index of features
uint f_img_height = D / f_img_width;
// calculate the prior probability of each cell is 1
float on_prob_temp = (z_col_sum[kth] - cur_z[nth * K + kth]) / (float)N;
float off_prob_temp = 1 - (z_col_sum[kth] - cur_z[nth * K + kth]) / (float)N;
// retrieve the transformation applied to this feature by this object
int v_scale = cur_r[nth * (K * NUM_TRANS) + kth * NUM_TRANS + V_SCALE];
int h_scale = cur_r[nth * (K * NUM_TRANS) + kth * NUM_TRANS + H_SCALE];
int v_dist = cur_r[nth * (K * NUM_TRANS) + kth * NUM_TRANS + V_TRANS];
int h_dist = cur_r[nth * (K * NUM_TRANS) + kth * NUM_TRANS + H_TRANS];
int new_height = f_img_height + v_scale, new_width = f_img_width + h_scale;
uint d, hh, ww;
// extremely hackish way to calculate the likelihood
for (d = 0; d < D; d++) {
// if the kth feature can turn on a pixel at d
if (cur_y[kth * D + d] == 1) {
// unpack d into h and w and get new index
h = d / f_img_width;
w = d % f_img_width;
for (hh = 0; hh < f_img_height; hh++) {
for (ww = 0; ww < f_img_width; ww++) {
if ((int)round((float)hh / new_height * f_img_height) == h &
(int)round((float)ww / new_width * f_img_width) == w) {
new_index = ((v_dist + hh) % f_img_height) * f_img_width + (h_dist + ww) % f_img_width;
// then the corresponding observed pixel is at new_index
// so, if the observed pixel at new_index is on
if (obs[nth * D + new_index] == 1) {
// if the nth object previously has the kth feature
if (cur_z[nth * K + kth] == 1) {
on_prob_temp *= 1 - pow(1 - lambda, z_by_ry[nth * D + new_index]) * (1 - epislon);
off_prob_temp *= 1 - pow(1 - lambda, z_by_ry[nth * D + new_index] - 1) * (1 - epislon);
} else {
on_prob_temp *= 1 - pow(1 - lambda, z_by_ry[nth * D + new_index] + 1) * (1 - epislon);
off_prob_temp *= 1 - pow(1 - lambda, z_by_ry[nth * D + new_index]) * (1 - epislon);
}
} else {
on_prob_temp *= 1 - lambda;
off_prob_temp *= 1.0f;
}
}
}
}
}
}
//printf("index: %d post_on: %f post_off: %f\n", nth * K + kth, on_prob_temp, off_prob_temp);
float post[2] = {on_prob_temp, off_prob_temp};
uint labels[2] = {1, 0};
pnormalize(post, 0, 2);
//printf("before index: %d %f %f %d \n", nth * K + kth, post[0], post[1], cur_z[nth * K + kth]);
cur_z[nth * K + kth] = sample(2, labels, post, 0, rand[nth * K + kth]);
//printf("after index: %d %f %f %d \n", nth * K + kth, post[0], post[1], cur_z[nth * K + kth]);
}
开发者ID:AusterweilLab,项目名称:MPBNP,代码行数:73,代码来源:tibp_noisyor_cl.c
示例3: gui
void Mesh2Cloud::addProperties() {
auto group = gui()->properties()->add<Section>("Sampling", "group");
auto samples = group->add<Number>("Samples Per Square Unit", "samples");
samples->setDigits(0);
samples->setMin(1);
samples->setMax(100000);
samples->setValue(100);
group->add<Button>("Sample", "sample")->setCallback([&] () { auto ns = gui()->properties()->get<Number>({"group", "samples"})->value(); sample(ns); });;
auto iogroup = gui()->properties()->add<Section>("Input/Output", "iogroup");
auto outFile = iogroup->add<File>("Save to: ", "outFile");
outFile->setMode(File::SAVE);
outFile->setCallback([&] (fs::path p) {
pcl::io::savePCDFileBinary(p.string(), *m_cloud);
gui()->log()->info("Saved pointcloud to: \""+p.string()+"\"");
});
outFile->disable();
}
开发者ID:paulhilbert,项目名称:visualizer,代码行数:20,代码来源:Mesh2Cloud.cpp
示例4: MOZ_ASSERT
nsresult
EMEH264Decoder::GmpInput(MP4Sample* aSample)
{
MOZ_ASSERT(IsOnGMPThread());
nsAutoPtr<MP4Sample> sample(aSample);
if (!mGMP) {
mCallback->Error();
return NS_ERROR_FAILURE;
}
if (sample->crypto.valid) {
CDMCaps::AutoLock caps(mProxy->Capabilites());
MOZ_ASSERT(caps.CanDecryptAndDecodeVideo());
const auto& keyid = sample->crypto.key;
if (!caps.IsKeyUsable(keyid)) {
nsRefPtr<nsIRunnable> task(new DeliverSample(this, sample.forget()));
caps.CallWhenKeyUsable(keyid, task, mGMPThread);
return NS_OK;
}
}
mLastStreamOffset = sample->byte_offset;
GMPVideoFrame* ftmp = nullptr;
GMPErr err = mHost->CreateFrame(kGMPEncodedVideoFrame, &ftmp);
if (GMP_FAILED(err)) {
mCallback->Error();
return NS_ERROR_FAILURE;
}
gmp::GMPVideoEncodedFrameImpl* frame = static_cast<gmp::GMPVideoEncodedFrameImpl*>(ftmp);
err = frame->CreateEmptyFrame(sample->size);
if (GMP_FAILED(err)) {
mCallback->Error();
return NS_ERROR_FAILURE;
}
memcpy(frame->Buffer(), sample->data, frame->Size());
frame->SetEncodedWidth(mConfig.display_width);
frame->SetEncodedHeight(mConfig.display_height);
frame->SetTimeStamp(sample->composition_timestamp);
frame->SetCompleteFrame(true);
frame->SetDuration(sample->duration);
if (sample->crypto.valid) {
frame->InitCrypto(sample->crypto);
}
frame->SetFrameType(sample->is_sync_point ? kGMPKeyFrame : kGMPDeltaFrame);
frame->SetBufferType(GMP_BufferLength32);
nsTArray<uint8_t> info; // No codec specific per-frame info to pass.
nsresult rv = mGMP->Decode(frame, false, info, 0);
if (NS_FAILED(rv)) {
mCallback->Error();
return rv;
}
return NS_OK;
}
开发者ID:andrenatal,项目名称:gecko-dev,代码行数:61,代码来源:EMEH264Decoder.cpp
示例5: sample_hidden
// Sample a hidden neuron, given a visible neuron vector
double sample_hidden(rbm* r, unsigned int j, std::vector<int> visible) {
return sample(hidden_probability(r, j, visible));
}
开发者ID:daydreamt,项目名称:pgm,代码行数:4,代码来源:rbm.cpp
示例6: sample
void Shape::sample(const core::Vec3 &ps, float u1, float u2, float u3, int *primID, core::Vec3 *p, core::Vec3 *n) const
{
return sample(u1, u2, u3, primID, p, n);
}
开发者ID:paprikarender,项目名称:paprika,代码行数:4,代码来源:shape.cpp
示例7: clear
//.........这里部分代码省略.........
return false;
}
//Load the info text
file >> word;
infoText = "";
while( word != "NumDimensions:" ){
infoText += word + " ";
file >> word;
}
//Get the number of dimensions in the training data
if( word != "NumDimensions:" ){
errorLog << "loadDatasetFromFile(const std::string &filename) - failed to find NumDimensions header!" << std::endl;
file.close();
return false;
}
file >> numDimensions;
//Get the total number of training examples in the training data
file >> word;
if( word != "TotalNumTrainingExamples:" && word != "TotalNumExamples:" ){
errorLog << "loadDatasetFromFile(const std::string &filename) - failed to find TotalNumTrainingExamples header!" << std::endl;
file.close();
return false;
}
file >> totalNumSamples;
//Get the total number of classes in the training data
file >> word;
if(word != "NumberOfClasses:"){
errorLog << "loadDatasetFromFile(string filename) - failed to find NumberOfClasses header!" << std::endl;
file.close();
return false;
}
file >> numClasses;
//Resize the class counter buffer and load the counters
classTracker.resize(numClasses);
//Get the total number of classes in the training data
file >> word;
if(word != "ClassIDsAndCounters:"){
errorLog << "loadDatasetFromFile(const std::string &filename) - failed to find ClassIDsAndCounters header!" << std::endl;
file.close();
return false;
}
for(UINT i=0; i<classTracker.getSize(); i++){
file >> classTracker[i].classLabel;
file >> classTracker[i].counter;
file >> classTracker[i].className;
}
//Check if the dataset should be scaled using external ranges
file >> word;
if(word != "UseExternalRanges:"){
errorLog << "loadDatasetFromFile(const std::string &filename) - failed to find UseExternalRanges header!" << std::endl;
file.close();
return false;
}
file >> useExternalRanges;
//If we are using external ranges then load them
if( useExternalRanges ){
externalRanges.resize(numDimensions);
for(UINT i=0; i<externalRanges.getSize(); i++){
file >> externalRanges[i].minValue;
file >> externalRanges[i].maxValue;
}
}
//Get the main training data
file >> word;
if( word != "LabelledTrainingData:" && word != "Data:"){
errorLog << "loadDatasetFromFile(const std::string &filename) - failed to find LabelledTrainingData header!" << std::endl;
file.close();
return false;
}
ClassificationSample tempSample( numDimensions );
data.resize( totalNumSamples, tempSample );
for(UINT i=0; i<totalNumSamples; i++){
UINT classLabel = 0;
VectorFloat sample(numDimensions,0);
file >> classLabel;
for(UINT j=0; j<numDimensions; j++){
file >> sample[j];
}
data[i].set(classLabel, sample);
}
file.close();
//Sort the class labels
sortClassLabels();
return true;
}
开发者ID:sgrignard,项目名称:grt,代码行数:101,代码来源:ClassificationData.cpp
示例8: main
int main() {
srand(time(NULL));
sample(10000000, 1000000);
}
开发者ID:jefferyyuan,项目名称:wypractice,代码行数:4,代码来源:r.c
示例9: UniformSample
double UniformSample(double max) {
boost::uniform_real<> dist(0, max);
boost::variate_generator<boost::mt19937&, boost::uniform_real<> > sample(
gen, dist);
return sample();
}
开发者ID:willmlam,项目名称:aomdd,代码行数:6,代码来源:utils.cpp
示例10: DeltaStarGibbs
//.........这里部分代码省略.........
for (it = chol[i].begin(); it != chol[i].end(); it++) {
int j = it->first;
double value = it->second;
if (j == i)
diag = value;
else
sum += value * mean[j];
}
mean[i] = (u[i] - sum) / diag;
}
// print mean value
/*
{
char filename[120];
sprintf(filename,"mean.txt");
FILE *out = fopen(filename,"w");
int i;
for (i = 0; i < mean.size(); i++) {
fprintf(out,"%20.18e\n",mean[i]);
}
fclose(out);
}
*/
// finished printing
// generate a sample with zero mean, or compute the sample that should have been sampled
// cout << "start sampling" << endl;
vector<double> sample(chol.size(),0.0);
if (draw == 1) {
vector<double> z(chol.size(),0.0);
for (k = 0; k < z.size(); k++)
z[k] = ran.Norm01();
// print z
/*
{
char filename[120];
sprintf(filename,"z.txt");
FILE *out = fopen(filename,"w");
int i;
for (i = 0; i < z.size(); i++) {
fprintf(out,"%20.18e\n",z[i]);
}
fclose(out);
}
*/
// finished printing
for (i = 0; i < sample.size(); i++) {
double diag = 0.0;
double sum = 0.0;
map<int,double>::iterator it;
for (it = chol[i].begin(); it != chol[i].end(); it++) {
int j = it->first;
double value = it->second;
if (j == i)
diag = value;
开发者ID:kscharpf,项目名称:XDE,代码行数:67,代码来源:Utility_v2.cpp
示例11: GaussianMean1DRegressionCompute
void GaussianMean1DRegressionCompute(const QUESO::BaseEnvironment& env,
double priorMean, double priorVar, const likelihoodData& dat)
{
// parameter space: 1-D on (-infinity, infinity)
QUESO::VectorSpace<P_V, P_M> paramSpace(
env, // queso environment
"param_", // name prefix
1, // dimensions
NULL); // names
P_V paramMin(paramSpace.zeroVector());
P_V paramMax(paramSpace.zeroVector());
paramMin[0] = -INFINITY;
paramMax[0] = INFINITY;
QUESO::BoxSubset<P_V, P_M> paramDomain(
"paramBox_", // name prefix
paramSpace, // vector space
paramMin, // min values
paramMax); // max values
// gaussian prior with user supplied mean and variance
P_V priorMeanVec(paramSpace.zeroVector());
P_V priorVarVec(paramSpace.zeroVector());
priorMeanVec[0] = priorMean;
priorVarVec[0] = priorVar;
QUESO::GaussianVectorRV<P_V, P_M> priorRv("prior_", paramDomain, priorMeanVec,
priorVarVec);
// likelihood is important
QUESO::GenericScalarFunction<P_V, P_M> likelihoodFunctionObj(
"like_", // name prefix
paramDomain, // image set
LikelihoodFunc<P_V, P_M>, // routine
(void *) &dat, // routine data ptr
true); // routineIsForLn
QUESO::GenericVectorRV<P_V, P_M> postRv(
"post_", // name prefix
paramSpace); // image set
// Initialize and solve the Inverse Problem with Bayes multi-level sampling
QUESO::StatisticalInverseProblem<P_V, P_M> invProb(
"", // name prefix
NULL, // alt options
priorRv, // prior RV
likelihoodFunctionObj, // likelihood fcn
postRv); // posterior RV
invProb.solveWithBayesMLSampling();
// compute mean and second moment of samples on each proc via Knuth online mean/variance algorithm
int N = invProb.postRv().realizer().subPeriod();
double subMean = 0.0;
double subM2 = 0.0;
double delta;
P_V sample(paramSpace.zeroVector());
for (int n = 1; n <= N; n++) {
invProb.postRv().realizer().realization(sample);
delta = sample[0] - subMean;
subMean += delta / n;
subM2 += delta * (sample[0] - subMean);
}
// gather all Ns, means, and M2s to proc 0
std::vector<int> unifiedNs(env.inter0Comm().NumProc());
std::vector<double> unifiedMeans(env.inter0Comm().NumProc());
std::vector<double> unifiedM2s(env.inter0Comm().NumProc());
MPI_Gather(&N, 1, MPI_INT, &(unifiedNs[0]), 1, MPI_INT, 0,
env.inter0Comm().Comm());
MPI_Gather(&subMean, 1, MPI_DOUBLE, &(unifiedMeans[0]), 1, MPI_DOUBLE, 0,
env.inter0Comm().Comm());
MPI_Gather(&subM2, 1, MPI_DOUBLE, &(unifiedM2s[0]), 1, MPI_DOUBLE, 0,
env.inter0Comm().Comm());
// get the total number of likelihood calls at proc 0
unsigned long totalLikelihoodCalls = 0;
MPI_Reduce(&likelihoodCalls, &totalLikelihoodCalls, 1, MPI_UNSIGNED_LONG,
MPI_SUM, 0, env.inter0Comm().Comm());
// compute global posterior mean and std via Chan algorithm, output results on proc 0
if (env.inter0Rank() == 0) {
int postN = unifiedNs[0];
double postMean = unifiedMeans[0];
double postVar = unifiedM2s[0];
for (unsigned int i = 1; i < unifiedNs.size(); i++) {
delta = unifiedMeans[i] - postMean;
postMean = (postN * postMean + unifiedNs[i] * unifiedMeans[i]) /
(postN + unifiedNs[i]);
postVar += unifiedM2s[i] + delta * delta *
(((double)postN * unifiedNs[i]) / (postN + unifiedNs[i]));
postN += unifiedNs[i];
}
postVar /= postN;
//compute exact answer - available in this case since the exact posterior is a gaussian
N = dat.dataSet.size();
double dataSum = 0.0;
for (int i = 0; i < N; i++)
dataSum += dat.dataSet[i];
//.........这里部分代码省略.........
开发者ID:roystgnr,项目名称:queso,代码行数:101,代码来源:test_GaussianMean1DRegression.C
示例12: sample
Point Geometry::sample(const Point&, const GeomSample &gs, Normal &normal) const {
return sample(gs, normal);
}
开发者ID:Twinklebear,项目名称:tray,代码行数:3,代码来源:geometry.cpp
示例13: main
int main (int argc, char **argv)
{
char c;
unsigned int flag = 0;
int interval = 1, count = 0, max_count = 1;
struct vg_data vg_now, vg_prev;
VMGuestLibError ret;
while ((c = getopt(argc, argv, "i:c:hvru")) != -1) {
switch(c) {
case 'i':
interval = atoi(optarg);
break;
case 'c':
max_count = atoi(optarg);
break;
case 'h':
usage();
return 0;
break;
case 'r': /* raw output */
flag |= FLAG_RAWOUTPUT;
break;
case 'v': /* verbose mode */
flag |= FLAG_VERBOSE;
break;
case 'u':
flag |= FLAG_UNIXTIME;
break;
default:
printf("Unkown option '%c'\n", c);
}
}
memset(&vg_now, 0x0, sizeof(struct vg_data));
ret = VMGuestLib_OpenHandle(&vg_now.handle);
if (ret != VMGUESTLIB_ERROR_SUCCESS) {
if (IS_VERBOSE(flag)) {
printf("VMGuestLib_OpenHandle: %d (%s)\n",
ret, VMGuestLib_GetErrorText(ret));
}
return 1;
}
if (sample(&vg_now, flag) != 0) {
goto bailout;
}
if (IS_RAWOUTPUT(flag)) {
printf("Timestamp "
"SessionId "
"HostProcessorSpeed "
"CpuReservationMHz CpuLimitMHz CpuShares "
"ElapsedMs CpuUsedMs CpuStolenMs "
"MemReservationMB MemLimitMB MemShares MemMappedMB "
"MemActiveMB MemOverheadMB MemBalloonedMB MemSwappedMB "
"MemSharedMB MemSharedSavedMB MemUsedMB\n"
);
} else {
printf("%-24s %-8s %-8s %8s %8s %8s %8s\n",
"Timestamp", "intvl(g)", "intvl(h)",
"used", "stolen", "%used", "%ready");
}
for (count = 0; count < max_count; count++) {
vg_prev = vg_now;
sleep(interval);
if (sample(&vg_now, flag) != 0) {
goto bailout;
}
output(&vg_now, &vg_prev, flag);
}
bailout:
ret = VMGuestLib_CloseHandle(vg_now.handle);
if (ret != VMGUESTLIB_ERROR_SUCCESS) {
if (IS_VERBOSE(flag)) {
printf("VMGuestLib_CloseHandle: %d (%s)\n",
ret, VMGuestLib_GetErrorText(ret));
}
return 1;
}
return 0;
}
开发者ID:thatsdone,项目名称:kusanagi,代码行数:90,代码来源:vgstat.c
示例14: main
int main ( int argc, char *argv[] )
{
//Variables for parsing the data file
std::string filename = "SPECT.train";
std::string line;
std::stringstream parse;
int ssize = 100; //establish a buffer size to store attribute values,
//which for binary classification string are no bigger than 1
char c[ssize];
char delimiter = ',';
//Variables to store the values in the data file
std::vector<int> tmpcase;
std::vector< std::vector<int> > training_set;
cv::Mat sample(0, 1, CV_32FC1);
cv::Mat labels(0, 1 , CV_16SC1);
cv::Mat train_set;
std::ifstream dataset_file(filename.c_str(), std::ios::in);
if(!dataset_file)
{
std::cerr << "Cannot load training set file" << std::endl;
}
else
{
while( (getline(dataset_file, line))!= NULL )
{
parse << line;
while( parse.getline(c,ssize,delimiter) )
{
tmpcase.push_back( (*c-'0') );
sample.push_back( (float)(*c-'0') );
}
parse.str(""); //safety measure to erase previous contents
parse.clear(); //clear flags to be able to read from it again
training_set.push_back(tmpcase);
tmpcase.clear();
train_set.push_back(sample.reshape(0,1));
labels.push_back((int)(sample.at<float>(0)));
sample = cv::Mat();
}
}
std::cout << train_set << std::endl;
cv::FileStorage fstore_traindata("spect_train.yml",cv::FileStorage::WRITE);
cv::Mat train_samples(train_set.colRange(1,train_set.cols));
fstore_traindata << "train_samples" << train_samples;
fstore_traindata << "train_labels" << labels;
fstore_traindata.release();
std::cout << train_samples << std::endl;
std::cout << labels << std::endl;
std::vector<int> tmp;
for(std::vector< std::vector<int> >::iterator it = training_set.begin(); it != training_set.end(); ++it)
{
tmp = *it;
for(std::vector<int>::iterator it2 = tmp.begin(); it2 != tmp.end(); ++it2)
{
std::cout << *it2 << " ";
}
std::cout << std::endl;
tmp.clear();
}
}
开发者ID:bosslegend33,项目名称:random_forests,代码行数:72,代码来源:help_mat.cpp
示例15: compute_tree_bagging
static int compute_tree_bagging(ETree *etree,int n,int d,double *x[],
int y[], int nmodels,int stumps, int minsize)
{
int i,b;
int *samples;
double **trx;
int *try;
if(nmodels<1){
fprintf(stderr,"compute_tree_bagging: nmodels must be greater than 0\n");
return 1;
}
if(stumps != 0 && stumps != 1){
fprintf(stderr,"compute_tree_bagging: parameter stumps must be 0 or 1\n");
return 1;
}
if(minsize < 0){
fprintf(stderr,"compute_tree_bagging: parameter minsize must be >= 0\n");
return 1;
}
etree->nclasses=iunique(y,n, &(etree->classes));
if(etree->nclasses<=0){
fprintf(stderr,"compute_tree_bagging: iunique error\n");
return 1;
}
if(etree->nclasses==1){
fprintf(stderr,"compute_tree_bagging: only 1 class recognized\n");
return 1;
}
if(etree->nclasses==2)
if(etree->classes[0] != -1 || etree->classes[1] != 1){
fprintf(stderr,"compute_tree_bagging: for binary classification classes must be -1,1\n");
return 1;
}
if(etree->nclasses>2)
for(i=0;i<etree->nclasses;i++)
if(etree->classes[i] != i+1){
fprintf(stderr,"compute_tree_bagging: for %d-class classification classes must be 1,...,%d\n",etree->nclasses,etree->nclasses);
return 1;
}
if(!(etree->tree=(Tree *)calloc(nmodels,sizeof(Tree)))){
fprintf(stderr,"compute_tree_bagging: out of memory\n");
return 1;
}
etree->nmodels=nmodels;
if(!(etree->weights=dvector(nmodels))){
fprintf(stderr,"compute_tree_bagging: out of memory\n");
return 1;
}
for(b=0;b<nmodels;b++)
etree->weights[b]=1.0 / (double) nmodels;
if(!(trx=(double **)calloc(n,sizeof(double*)))){
fprintf(stderr,"compute_tree_bagging: out of memory\n");
return 1;
}
if(!(try=ivector(n))){
fprintf(stderr,"compute_tree_bagging: out of memory\n");
return 1;
}
for(b=0;b<nmodels;b++){
if(sample(n, NULL, n, &samples, TRUE,b)!=0){
fprintf(stderr,"compute_tree_bagging: sample error\n");
return 1;
}
for(i =0;i<n;i++){
trx[i] = x[samples[i]];
try[i] = y[samples[i]];
}
if(compute_tree(&(etree->tree[b]),n,d,trx,try,stumps,minsize)!=0){
fprintf(stderr,"compute_tree_bagging: compute_tree error\n");
return 1;
}
free_ivector(samples);
}
free(trx);
free_ivector(try);
return 0;
}
static int compute_tree_aggregate(ETree *etree,int n,int d,double *x[],int y[],
int nmodels,int stumps, int minsize)
//.........这里部分代码省略.........
开发者ID:Arafatk,项目名称:mlpy,代码行数:101,代码来源:tree.c
示例16: PBRT_MLT_STARTED_RENDERING
void MetropolisRenderer::Render(const Scene *scene) {
PBRT_MLT_STARTED_RENDERING();
if (scene->lights.size() > 0) {
int x0, x1, y0, y1;
camera->film->GetPixelExtent(&x0, &x1, &y0, &y1);
float t0 = camera->shutterOpen, t1 = camera->shutterClose;
Distribution1D *lightDistribution = ComputeLightSamplingCDF(scene);
if (directLighting != NULL) {
PBRT_MLT_STARTED_DIRECTLIGHTING();
// Compute direct lighting before Metropolis light transport
if (nDirectPixelSamples > 0) {
LDSampler sampler(x0, x1, y0, y1, nDirectPixelSamples, t0, t1);
Sample *sample = new Sample(&sampler, directLighting, NULL, scene);
vector<Task *> directTasks;
int nDirectTasks = max(32 * NumSystemCores(),
(camera->film->xResolution * camera->film->yResolution) / (16*16));
nDirectTasks = RoundUpPow2(nDirectTasks);
ProgressReporter directProgress(nDirectTasks, "Direct Lighting");
for (int i = 0; i < nDirectTasks; ++i)
directTasks.push_back(new SamplerRendererTask(scene, this, camera, directProgress,
&sampler, sample, false, i, nDirectTasks));
std::reverse(directTasks.begin(), directTasks.end());
EnqueueTasks(directTasks);
WaitForAllTasks();
for (uint32_t i = 0; i < directTasks.size(); ++i)
delete directTasks[i];
delete sample;
directProgress.Done();
}
camera->film->WriteImage();
PBRT_MLT_FINISHED_DIRECTLIGHTING();
}
// Take initial set of samples to compute $b$
PBRT_MLT_STARTED_BOOTSTRAPPING(nBootstrap);
RNG rng(0);
MemoryArena arena;
vector<float> bootstrapI;
vector<PathVertex> cameraPath(maxDepth, PathVertex());
vector<PathVertex> lightPath(maxDepth, PathVertex());
float sumI = 0.f;
bootstrapI.reserve(nBootstrap);
MLTSample sample(maxDepth);
for (uint32_t i = 0; i < nBootstrap; ++i) {
// Generate random sample and path radiance for MLT bootstrapping
float x = Lerp(rng.RandomFloat(), x0, x1);
float y = Lerp(rng.RandomFloat(), y0, y1);
LargeStep(rng, &sample, maxDepth, x, y, t0, t1, bidirectional);
Spectrum L = PathL(sample, scene, arena, camera, lightDistribution,
&cameraPath[0], &lightPath[0], rng);
// Compute contribution for random sample for MLT bootstrapping
float I = ::I(L);
sumI += I;
bootstrapI.push_back(I);
arena.FreeAll();
}
float b = sumI / nBootstrap;
PBRT_MLT_FINISHED_BOOTSTRAPPING(b);
Info("MLT computed b = %f", b);
// Select initial sample from bootstrap samples
float contribOffset = rng.RandomFloat() * sumI;
rng.Seed(0);
sumI = 0.f;
MLTSample initialSample(maxDepth);
for (uint32_t i = 0; i < nBootstrap; ++i) {
float x = Lerp(rng.RandomFloat(), x0, x1);
float y = Lerp(rng.RandomFloat(), y0, y1);
LargeStep(rng, &initialSample, maxDepth, x, y, t0, t1,
bidirectional);
sumI += bootstrapI[i];
if (sumI > contribOffset)
break;
}
// Launch tasks to generate Metropolis samples
uint32_t nTasks = largeStepsPerPixel;
uint32_t largeStepRate = nPixelSamples / largeStepsPerPixel;
Info("MLT running %d tasks, large step rate %d", nTasks, largeStepRate);
ProgressReporter progress(nTasks * largeStepRate, "Metropolis");
vector<Task *> tasks;
Mutex *filmMutex = Mutex::Create();
Assert(IsPowerOf2(nTasks));
uint32_t scramble[2] = { rng.RandomUInt(), rng.RandomUInt() };
uint32_t pfreq = (x1-x0) * (y1-y0);
for (uint32_t i = 0; i < nTasks; ++i) {
float d[2];
Sample02(i, scramble, d);
tasks.push_back(new MLTTask(progress, pfreq, i,
d[0], d[1], x0, x1, y0, y1, t0, t1, b, initialSample,
scene, camera, this, filmMutex, lightDistribution));
}
EnqueueTasks(tasks);
WaitForAllTasks();
for (uint32_t i = 0; i < tasks.size(); ++i)
delete tasks[i];
progress.Done();
Mutex::Destroy(filmMutex);
delete lightDistribution;
//.........这里部分代码省略.........
开发者ID:xtype0x,项目名称:rendering-project,代码行数:101,代码来源:metropolis.cpp
示例17: compute_tree_adaboost
static int compute_tree_adaboost(ETree *etree,int n,int d,double *x[],int y[],
int nmodels,int stumps, int minsize)
{
int i,b;
int *samples;
double **trx;
int *try;
double *prob;
double *prob_copy;
double sumalpha;
double eps;
int *pred;
double *margin;
double sumprob;
if(nmodels<1){
fprintf(stderr,"compute_tree_adaboost: nmodels must be greater than 0\n");
return 1;
}
if(stumps != 0 && stumps != 1){
fprintf(stderr,"compute_tree_bagging: parameter stumps must be 0 or 1\n");
return 1;
}
if(minsize < 0){
fprintf(stderr,"compute_tree_bagging: parameter minsize must be >= 0\n");
return 1;
}
etree->nclasses=iunique(y,n, &(etree->classes));
if(etree->nclasses<=0){
fprintf(stderr,"compute_tree_adaboost: iunique error\n");
return 1;
}
if(etree->nclasses==1){
fprintf(stderr,"compute_tree_adaboost: only 1 class recognized\n");
return 1;
}
if(etree->nclasses==2)
if(etree->classes[0] != -1 || etree->classes[1] != 1){
fprintf(stderr,"compute_tree_adaboost: for binary classification classes must be -1,1\n");
return 1;
}
if(etree->nclasses>2){
fprintf(stderr,"compute_tree_adaboost: multiclass classification not allowed\n");
return 1;
}
if(!(etree->tree=(Tree *)calloc(nmodels,sizeof(Tree)))){
fprintf(stderr,"compute_tree_adaboost: out of memory\n");
return 1;
}
if(!(etree->weights=dvector(nmodels))){
fprintf(stderr,"compute_tree_adaboost: out of memory\n");
return 1;
}
if(!(trx=(double **)calloc(n,sizeof(double*)))){
fprintf(stderr,"compute_tree_adaboost: out of memory\n");
return 1;
}
if(!(try=ivector(n))){
fprintf(stderr,"compute_tree_adaboost: out of memory\n");
return 1;
}
if(!(prob_copy=dvector(n))){
fprintf(stderr,"compute_tree_adaboost: out of memory\n");
return 1;
}
if(!(prob=dvector(n))){
fprintf(stderr,"compute_tree_adaboost: out of memory\n");
return 1;
}
if(!(pred=ivector(n))){
fprintf(stderr,"compute_tree_adaboost: out of memory\n");
return 1;
}
for(i =0;i<n;i++)
prob[i]=1.0/(double)n;
etree->nmodels=nmodels;
sumalpha=0.0;
for(b=0;b<nmodels;b++){
for(i =0;i<n;i++)
prob_copy[i]=prob[i];
if(sample(n, prob_copy, n, &samples, TRUE,b)!=0){
fprintf(stderr,"compute_tree_adaboost: sample error\n");
return 1;
}
//.........这里部分代码省略.........
开发者ID:Arafatk,项目名称:mlpy,代码行数:101,代码来源:tree.c
示例18: WinMain
_Use_decl_annotations_
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE, LPSTR, int nCmdShow)
{
D3D12HelloConstBuffers sample(1280, 720, L"D3D12 Raymarcher");
return sample.Run(hInstance, nCmdShow);
}
开发者ID:WilliamChao,项目名称:D3D12Raymarcher,代码行数:6,代码来源:Main.cpp
示例19: compute_sift_keypoints
void compute_sift_keypoints(float *input, keypointslist& keypoints, int width, int height, siftPar &par)
{
flimage image;
/// Make zoom of image if necessary
float octSize = 1.0;
if (par.DoubleImSize){
//printf("... compute_sift_keypoints :: applying zoom\n");
// image.create(2*width, 2*height);
// apply_zoom(input,image.getPlane(),2.0,par.order,width,height);
// octSize *= 0.5;
printf("Doulbe image size not allowed. Guoshen Yu\n");
exit(-1);
} else
{
image.create(width,height,input);
}
// printf("Using initial Dog value: %f\n", par.PeakThresh);
// printf("Double image size: %d\n", par.DoubleImSize);
// printf("Interpolation order: %d\n", par.order);
/// Apply initial smoothing to input image to raise its smoothing to par.InitSigma.
/// We assume image from camera has smoothing of sigma = 0.5, which becomes sigma = 1.0 if image has been doubled.
/// increase = sqrt(Init^2 - Current^2)
float curSigma;
if (par.DoubleImSize) curSigma = 1.0; else curSigma = 0.5;
if (par.InitSigma > curSigma ) {
if (DEBUG) printf("Convolving initial image to achieve std: %f \n", par.InitSigma);
float sigma = (float) sqrt((double)(par.InitSigma * par.InitSigma - curSigma * curSigma));
gaussian_convolution( image.getPlane(), image.getPlane(), image.nwidth(), image.nheight(), sigma);
}
/// Convolve by par.InitSigma at each step inside OctaveKeypoints by steps of
/// Subsample of factor 2 while reasonable image size
/// Keep reducing image by factors of 2 until one dimension is
/// smaller than minimum size at which a feature could be detected.
int minsize = 2 * par.BorderDist + 2;
int OctaveCounter = 0;
//printf("... compute_sift_keypoints :: maximum number of scales : %d\n", par.OctaveMax);
while (image.nwidth() > minsize && image.nheight() > minsize && OctaveCounter < par.OctaveMax) {
if (DEBUG) printf("Calling OctaveKeypoints \n");
OctaveKeypoints(image, octSize, keypoints,par);
// image is blurred inside OctaveKeypoints and therefore can be sampled
flimage aux( (int)((float) image.nwidth() / 2.0f) , (int)((float) image.nheight() / 2.0f));
if (DEBUG) printf("Sampling initial image \n");
sample(image.getPlane(), aux.getPlane(), 2.0f, image.nwidth(), image.nheight());
image = aux;
octSize *= 2.0;
OctaveCounter++;
}
/* printf("sift:: %d keypoints\n", keypoints.size());
printf("sift:: plus non correctly localized: %d \n", par.noncorrectlylocalized);*/
}
开发者ID:master0567,项目名称:MarkerLessAR,代码行数:84,代码来源:demo_lib_sift.cpp
示例20: gray
void Sighter::procSight(cv::Mat &frame, cv::Mat &lfeat, cv::Mat &rfeat) {
cv::cvtColor(frame, drawMat, CV_BGRA2BGR);
cv::flip(drawMat, drawMat, 1);
cv::cvtColor(drawMat, grayMat, CV_BGR2GRAY);
cv::Rect frect;
cv::Mat& draw = drawMat;
cv::Mat& gray = grayMat;
//Detect face
cv::vector<cv::Rect> faces;
faceDetector.detectMultiScale(gray, faces, 1.1, 20, CV_HAAR_DO_CANNY_PRUNING|CV_HAAR_FIND_BIGGEST_OBJECT, cv::Size(gray.cols/4,gray.rows/4));
if(faces.empty()) {
return;
}
frect = faces[0];
cv::Mat face = gray(frect);
//Detect eye
cv::vector<cv::Rect> leyes,reyes;
cv::Size max_size = cv::Size(face.cols/2,face.rows/4);
cv::Size min_size = cv::Size(face.cols/10, 10);
cv::Mat lface = face(cv::Rect(0,face.rows/4,face.cols/2,face.rows/3));
eyeDetector.detectMultiScale(lface, leyes, 1.1, 20, CV_HAAR_DO_CANNY_PRUNING, min_size, max_size);
cv::Mat rface = face(cv::Rect(face.cols/2,face.rows/4,face.cols/2,face.rows/3));
eyeDetector.detectMultiScale(rface, reyes, 1.1, 20, CV_HAAR_DO_CANNY_PRUNING, min_size, max_size);
int szl = (int)leyes.size();
int szr = (int)reyes.size();
if(szl &l
|
请发表评论