本文整理汇总了Python中tests.run_test函数的典型用法代码示例。如果您正苦于以下问题:Python run_test函数的具体用法?Python run_test怎么用?Python run_test使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了run_test函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: main
def main(do_tests=False, do_bench=False):
# Neither tests nor benchmarks are being run.
if not do_tests and not do_bench:
print("test: neither tests nor benchmarks are enabled")
parser.print_help()
return
if do_tests:
print("[!] Running test suite!")
tests.run_test()
if do_bench:
print("[!] Running benchmarks! (may take some time)")
tests.run_bench()
开发者ID:cyphar,项目名称:redone,代码行数:14,代码来源:test.py
示例2: set
epochs=1,
reproducible=True, #slow, turn off for real problems
seed=1234)
# conver train_supervised with autoencoder to lower-dimensional space
train_supervised_features = ae_model.deepfeatures(train_supervised[0:resp]._frame(), 0)
assert train_supervised_features.ncol == nfeatures, "Dimensionality of reconstruction is wrong!"
# Train DRF on extracted feature space
drf_model = h2o.random_forest(x=train_supervised_features[0:20],
y=train_supervised[resp],
ntrees=10,
min_rows=10,
seed=1234)
# Test the DRF model on the test set (processed through deep features)
test_features = ae_model.deepfeatures(test_hex[0:resp]._frame(), 0)
test_features = test_features.cbind(test_hex[resp])._frame()
# Confusion Matrix and assertion
cm = drf_model.confusion_matrix(test_features)
cm.show()
# 10% error +/- 0.001
assert abs(cm.cell_values[10][10] - 0.082) < 0.001, "Error. Expected 0.082, but got {0}".format(cm.cell_values[10][10])
if __name__ == '__main__':
tests.run_test(sys.argv, deeplearning_autoencoder)
开发者ID:tomasgreif,项目名称:h2o-3,代码行数:29,代码来源:pyunit_autoencoderDeepLearning_large.py
示例3: list
mul_metric_diff)
# Clustering metric json
df = h2o.import_file(path=h2o.locate("smalldata/iris/iris.csv"))
clus_mod = h2o.kmeans(x=df[0:4], k=3, standardize=False)
clus_met = clus_mod.model_performance()
clus_metric_json_keys_have = clus_met._metric_json.keys()
clus_metric_json_keys_desired = [u'tot_withinss',
u'model_category',
u'description',
u'frame',
u'model_checksum',
u'MSE',
u'__meta',
u'scoring_time',
u'betweenss',
u'predictions',
u'totss',
u'model',
u'duration_in_ms',
u'frame_checksum',
u'centroid_stats']
clus_metric_diff = list(set(clus_metric_json_keys_have) - set(clus_metric_json_keys_desired))
assert not clus_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) clustering " \
"metric json. The difference is {2}".format(clus_metric_json_keys_have,
clus_metric_json_keys_desired,
clus_metric_diff)
if __name__ == "__main__":
tests.run_test(sys.argv, metric_json_check)
开发者ID:rakeshsukumar,项目名称:h2o-3,代码行数:30,代码来源:pyunit_metric_json_check.py
示例4: abs
assert abs(1515.91815848623 - prostate_glm_h2o.residual_deviance()) < 0.1
print "Checking binomial model without offset..."
prostate_glm_h2o = h2o.glm(
x=prostate_hex[["RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON"]],
y=prostate_hex["CAPSULE"],
training_frame=prostate_hex,
family="poisson",
standardize=False,
)
print "h2o residual: {0}".format(prostate_glm_h2o.residual_deviance())
print "r residual: {0}".format(216.339989007507)
assert abs(216.339989007507 - prostate_glm_h2o.residual_deviance()) < 0.1
print "Checking binomial model with offset..."
prostate_glm_h2o = h2o.glm(
x=prostate_hex[["RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON", "AGE"]],
y=prostate_hex["CAPSULE"],
training_frame=prostate_hex,
family="poisson",
offset_column="AGE",
standardize=False,
)
print "h2o residual: {0}".format(prostate_glm_h2o.residual_deviance())
print "r residual: {0}".format(2761.76218461138)
assert abs(2761.76218461138 - prostate_glm_h2o.residual_deviance()) < 0.1
if __name__ == "__main__":
tests.run_test(sys.argv, offset_1897)
开发者ID:kyoren,项目名称:https-github.com-h2oai-h2o-3,代码行数:30,代码来源:pyunit_NOPASS_hex_1897_glm_offset.py
示例5: anyfactor
import sys
sys.path.insert(1, "../../../")
import h2o, tests
def anyfactor():
iris = h2o.import_file(path=h2o.locate("smalldata/iris/iris.csv"))
# frame (positive example)
assert iris.anyfactor(), "Expected true, but got false. Column 5 is a factor."
# frame (negative example)
assert not iris[:,:4].anyfactor(), "Expected false, but got true. Columns 1-4 are numeric."
# vec (positive example)
assert iris[4].anyfactor(), "Expected true, but got false. Column 5 is a factor."
# vec (negative example)
assert not iris[0].anyfactor(), "Expected false, but got true. Columns 1 is numeric."
if __name__ == "__main__":
tests.run_test(sys.argv, anyfactor)
开发者ID:Jacksonlark,项目名称:h2o-3,代码行数:24,代码来源:pyunit_anyfactor.py
示例6: weights_and_distributions
import sys
sys.path.insert(1, "../../../")
import h2o, tests
def weights_and_distributions(ip,port):
htable = h2o.upload_file(h2o.locate("smalldata/gbm_test/moppe.csv"))
htable["premiekl"] = htable["premiekl"].asfactor()
htable["moptva"] = htable["moptva"].asfactor()
htable["zon"] = htable["zon"]
# gamma
dl = h2o.deeplearning(x=htable[0:3],y=htable["medskad"],training_frame=htable,distribution="gamma",weights_column="antskad")
predictions = dl.predict(htable)
# gaussian
dl = h2o.deeplearning(x=htable[0:3],y=htable["medskad"],training_frame=htable,distribution="gaussian",weights_column="antskad")
predictions = dl.predict(htable)
# poisson
dl = h2o.deeplearning(x=htable[0:3],y=htable["medskad"],training_frame=htable,distribution="poisson",weights_column="antskad")
predictions = dl.predict(htable)
# tweedie
dl = h2o.deeplearning(x=htable[0:3],y=htable["medskad"],training_frame=htable,distribution="tweedie",weights_column="antskad")
predictions = dl.predict(htable)
if __name__ == "__main__":
tests.run_test(sys.argv, weights_and_distributions)
开发者ID:rakeshsukumar,项目名称:h2o-3,代码行数:29,代码来源:pyunit_weights_and_distributionsDeeplearning.py
示例7: get_modelKmeans
from sklearn.preprocessing import Imputer
def get_modelKmeans():
# Connect to a pre-existing cluster
# connect to localhost:54321
#Log.info("Importing benign.csv data...\n")
benign_h2o = h2o.import_file(path=h2o.locate("smalldata/logreg/benign.csv"))
#benign_h2o.summary()
benign_sci = np.genfromtxt(h2o.locate("smalldata/logreg/benign.csv"), delimiter=",")
# Impute missing values with column mean
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
benign_sci = imp.fit_transform(benign_sci)
for i in range(2,7):
# Log.info("H2O K-Means")
km_h2o = h2o.kmeans(x=benign_h2o, k=i)
km_h2o.show()
model = h2o.get_model(km_h2o._id)
model.show()
km_sci = KMeans(n_clusters=i, init='k-means++', n_init=1)
km_sci.fit(benign_sci)
print "sckit centers"
print km_sci.cluster_centers_
if __name__ == "__main__":
tests.run_test(sys.argv, get_modelKmeans)
开发者ID:Jacksonlark,项目名称:h2o-3,代码行数:29,代码来源:pyunit_get_modelKmeans.py
示例8: rep_len_check
import sys
sys.path.insert(1, "../../")
import h2o, tests
def rep_len_check():
# Connect to a pre-existing cluster
iris = h2o.import_file(path=h2o.locate("smalldata/iris/iris.csv"))
# data is single column (vec)
vec = iris[0].rep_len(length_out=301)
assert vec.nrow == 301, "Expected an H2OVec with 301 rows, but got {0} rows".format(vec.nrow)
for r in range(len(vec)): assert vec[r,:] == vec[r % 150,:], "Expected {0}, but got {1}".format(vec[r % 150,:], vec[r,:])
# data is frame
fr = iris.rep_len(length_out=7)
assert fr.nrow == 150 and fr.ncol == 7, "Expected an H2OFrame with 150 rows and 7 columns, but got {0} rows and {1} cols".format(fr.nrow, fr.ncol)
if __name__ == "__main__":
tests.run_test(sys.argv, rep_len_check)
开发者ID:Jacksonlark,项目名称:h2o-3,代码行数:21,代码来源:pyunit_rep_len.py
示例9: assert
df_hex.summary()
assert (not df_hex['h1'].isfactor())
assert (df_hex['h2'].isfactor())
assert (not df_hex['h3'].isfactor())
df_hex['h1'] = df_hex['h1'].asfactor()
df_hex['h2'] = df_hex['h2'].asfactor()
df_hex['h3'] = df_hex['h3'].asfactor()
df_hex.show()
df_hex.summary()
assert (df_hex['h1'].isfactor())
assert (df_hex['h2'].isfactor())
assert (df_hex['h3'].isfactor())
df_hex['h1'] = df_hex['h1'].asnumeric()
df_hex['h2'] = df_hex['h2'].asnumeric()
df_hex['h3'] = df_hex['h3'].asnumeric()
df_hex.show()
df_hex.summary()
assert (not df_hex['h1'].isfactor())
assert (not df_hex['h2'].isfactor())
assert (not df_hex['h3'].isfactor())
if __name__ == "__main__":
tests.run_test(sys.argv, continuous_or_categorical)
开发者ID:rakeshsukumar,项目名称:h2o-3,代码行数:30,代码来源:pyunit_hexdev_29_categorical_continuous.py
示例10:
res = iris[0] == 4.7
res_rows = res.nrow
assert res_rows == rows, "dimension mismatch"
new_rows = iris[res].nrow
assert new_rows == 2, "wrong number of rows returned"
res = 3.5 == iris[1]
res_rows = res.nrow
assert res_rows == rows, "dimension mismatch"
new_rows = iris[res].nrow
assert new_rows == 6, "wrong number of rows returned"
# frame/frame
res = iris == iris
res_rows, res_cols = res.dim
assert res_rows == rows and res_cols == cols, "dimension mismatch"
res = iris[0:2] == iris[1:3]
res_rows, res_cols = res.dim
assert res_rows == rows and res_cols == 2, "dimension mismatch"
#try:
# res = iris == iris[0:3]
# res.show()
# assert False, "expected error. frames are different dimensions."
#except EnvironmentError:
# pass
if __name__ == "__main__":
tests.run_test(sys.argv, binop_eq)
开发者ID:rakeshsukumar,项目名称:h2o-3,代码行数:30,代码来源:pyunit_binop2_eq.py
示例11: gbm_mean_residual_deviance
import sys
sys.path.insert(1,"../../../")
import h2o, tests
def gbm_mean_residual_deviance():
cars = h2o.import_file(path=tests.locate("smalldata/junit/cars_20mpg.csv"))
s = cars[0].runif()
train = cars[s > 0.2]
valid = cars[s <= 0.2]
predictors = ["displacement","power","weight","acceleration","year"]
response_col = "economy"
gbm = h2o.gbm(x=train[predictors],
y=train[response_col],
validation_x=valid[predictors],
validation_y=valid[response_col],
nfolds=3)
gbm_mrd = gbm.mean_residual_deviance(train=True,valid=True,xval=True)
assert isinstance(gbm_mrd['train'],float), "Expected training mean residual deviance to be a float, but got " \
"{0}".format(type(gbm_mrd['train']))
assert isinstance(gbm_mrd['valid'],float), "Expected validation mean residual deviance to be a float, but got " \
"{0}".format(type(gbm_mrd['valid']))
assert isinstance(gbm_mrd['xval'],float), "Expected cross-validation mean residual deviance to be a float, but got " \
"{0}".format(type(gbm_mrd['xval']))
if __name__ == '__main__':
tests.run_test(sys.argv, gbm_mean_residual_deviance)
开发者ID:kyoren,项目名称:https-github.com-h2oai-h2o-3,代码行数:27,代码来源:pyunit_mean_residual_devianceGBM.py
示例12: check_same
h2o_zero_weights.set_names(["weights"])
h2o_data_zero_weights = h2o_cars_data.cbind(h2o_zero_weights)
h2o_data_zeros_removed = h2o_cars_data[h2o_zero_weights["weights"] == 1]
print "\n\nChecking that using some zero weights is equivalent to removing those observations:"
check_same(h2o_data_zeros_removed, h2o_data_zero_weights, 1)
# doubled weights same as doubled observations
doubled_weights = [[1] if random.randint(0,1) else [2] for r in range(406)]
h2o_doubled_weights = h2o.H2OFrame(python_obj=doubled_weights)
h2o_doubled_weights.set_names(["weights"])
h2o_data_doubled_weights = h2o_cars_data.cbind(h2o_doubled_weights)
doubled_data = h2o.as_list(h2o_cars_data, use_pandas=False)
colnames = doubled_data.pop(0)
for idx, w in enumerate(doubled_weights):
if w[0] == 2: doubled_data.append(doubled_data[idx])
h2o_data_doubled = h2o.H2OFrame(python_obj=doubled_data)
h2o_data_doubled.set_names(colnames)
h2o_data_doubled["economy_20mpg"] = h2o_data_doubled["economy_20mpg"].asfactor()
h2o_data_doubled["cylinders"] = h2o_data_doubled["cylinders"].asfactor()
h2o_data_doubled_weights["economy_20mpg"] = h2o_data_doubled_weights["economy_20mpg"].asfactor()
h2o_data_doubled_weights["cylinders"] = h2o_data_doubled_weights["cylinders"].asfactor()
print "\n\nChecking that doubling some weights is equivalent to doubling those observations:"
check_same(h2o_data_doubled, h2o_data_doubled_weights, 1)
if __name__ == "__main__":
tests.run_test(sys.argv, weights_var_imp)
开发者ID:junwucs,项目名称:h2o-3,代码行数:30,代码来源:pyunit_weights_var_impGBM.py
示例13: Unif
# Training set has two predictor columns
# X1: 10 categorical levels, 100 observations per level; X2: Unif(0,1) noise
# Ratio of y = 1 per Level: cat01 = 1.0 (strong predictor), cat02 to cat10 = 0.5 (weak predictors)
#Log.info("Importing swpreds_1000x3.csv data...\n")
swpreds = h2o.import_file(path=tests.locate("smalldata/gbm_test/swpreds_1000x3.csv"))
swpreds["y"] = swpreds["y"].asfactor()
#Log.info("Summary of swpreds_1000x3.csv from H2O:\n")
#swpreds.summary()
# Train H2O DRF without Noise Column
#Log.info("Distributed Random Forest with only Predictor Column")
model1 = h2o.random_forest(x=swpreds[["X1"]], y=swpreds["y"], ntrees=50, max_depth=20, nbins=500)
model1.show()
perf1 = model1.model_performance(swpreds)
print(perf1.auc())
# Train H2O DRF Model including Noise Column:
#Log.info("Distributed Random Forest including Noise Column")
model2 = h2o.random_forest(x=swpreds[["X1","X2"]], y=swpreds["y"], ntrees=50, max_depth=20, nbins=500)
model2.show()
perf2 = model2.model_performance(swpreds)
print(perf2.auc())
if __name__ == "__main__":
tests.run_test(sys.argv, swpredsRF)
开发者ID:kyoren,项目名称:https-github.com-h2oai-h2o-3,代码行数:30,代码来源:pyunit_swpredsRF.py
示例14: benignKmeans
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
def benignKmeans():
# Connect to a pre-existing cluster
# connect to localhost:54321
# Log.info("Importing benign.csv data...\n")
benign_h2o = h2o.import_file(path=h2o.locate("smalldata/logreg/benign.csv"))
#benign_h2o.summary()
benign_sci = np.genfromtxt(h2o.locate("smalldata/logreg/benign.csv"), delimiter=",")
# Impute missing values with column mean
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
benign_sci = imp.fit_transform(benign_sci)
# Log.info(paste("H2O K-Means with ", i, " clusters:\n", sep = ""))
for i in range(1,7):
benign_h2o_km = h2o.kmeans(x=benign_h2o, k=i)
print "H2O centers"
print benign_h2o_km.centers()
benign_sci_km = KMeans(n_clusters=i, init='k-means++', n_init=1)
benign_sci_km.fit(benign_sci)
print "sckit centers"
print benign_sci_km.cluster_centers_
if __name__ == "__main__":
tests.run_test(sys.argv, benignKmeans)
开发者ID:Jacksonlark,项目名称:h2o-3,代码行数:30,代码来源:pyunit_benignKmeans.py
示例15: pyunit_remove_vecs
import sys
sys.path.insert(1, "../../")
import h2o, tests
import random
def pyunit_remove_vecs():
# TODO PUBDEV-1789
pros = h2o.import_file(h2o.locate("smalldata/prostate/prostate.csv"))
rows, cols = pros.dim
remove = random.randint(1,5)
p1 = pros.remove_vecs(cols=random.sample(range(cols),remove))
new_rows, new_cols = p1.dim
assert new_rows == rows and new_cols == cols-remove, "Expected {0} rows and {1} columns, but got {2} rows and {3} " \
"columns.".format(rows,cols,new_rows,new_cols)
remove = random.randint(1,5)
p1 = pros.remove_vecs(cols=random.sample(pros.names,remove))
new_rows, new_cols = p1.dim
assert new_rows == rows and new_cols == cols-remove, "Expected {0} rows and {1} columns, but got {2} rows and {3} " \
"columns.".format(rows,cols,new_rows,new_cols)
if __name__ == "__main__":
tests.run_test(sys.argv, pyunit_remove_vecs)
开发者ID:Jacksonlark,项目名称:h2o-3,代码行数:24,代码来源:pyunit_NOPASS_remove_vecs.py
示例16: vec_slicing
import sys
sys.path.insert(1, "../../../")
import h2o, tests
def vec_slicing():
iris = h2o.import_file(path=tests.locate("smalldata/iris/iris_wheader.csv"))
iris.show()
###################################################################
# H2OVec[int]
res = 2 - iris
res2 = res[0]
assert abs(res2[3,0] - -2.6) < 1e-10 and abs(res2[17,0] - -3.1) < 1e-10 and abs(res2[24,0] - -2.8) < 1e-10, "incorrect values"
# H2OVec[slice]
res = iris[12:25,1]
assert abs(res[0,0] - 3.0) < 1e-10 and abs(res[1,0] - 3.0) < 1e-10 and abs(res[5,0] - 3.5) < 1e-10, "incorrect values"
if __name__ == "__main__":
tests.run_test(sys.argv, vec_slicing)
开发者ID:kyoren,项目名称:https-github.com-h2oai-h2o-3,代码行数:24,代码来源:pyunit_vec_slicing.py
示例17: group_by
import pandas as pd
import numpy as np
def group_by():
# Connect to a pre-existing cluster
h2o_iris = h2o.import_file(path=h2o.locate("smalldata/iris/iris_wheader.csv"))
pd_iris = pd.read_csv(h2o.locate("smalldata/iris/iris_wheader.csv"))
na_handling = ["ignore","rm","all"]
col_names = h2o_iris.col_names[0:4]
print "Running smoke test"
# smoke test
for na in na_handling:
grouped = h2o_iris.group_by("class")
grouped \
.count(na=na) \
.min( na=na) \
.max( na=na) \
.mean( na=na) \
.var( na=na) \
.sd( na=na) \
.ss( na=na) \
.sum( na=na)
print grouped.get_frame()
if __name__ == "__main__":
tests.run_test(sys.argv, group_by)
开发者ID:junwucs,项目名称:h2o-3,代码行数:30,代码来源:pyunit_groupby.py
示例18: wide_dataset_large
import numpy as np
def wide_dataset_large():
print("Reading in Arcene training data for binomial modeling.")
trainDataResponse = np.genfromtxt(tests.locate("smalldata/arcene/arcene_train_labels.labels"), delimiter=' ')
trainDataResponse = np.where(trainDataResponse == -1, 0, 1)
trainDataFeatures = np.genfromtxt(tests.locate("smalldata/arcene/arcene_train.data"), delimiter=' ')
trainData = h2o.H2OFrame(np.column_stack((trainDataResponse, trainDataFeatures)).tolist())
print("Run model on 3250 columns of Arcene with strong rules off.")
model = h2o.glm(x=trainData[1:3250], y=trainData[0].asfactor(), family="binomial", lambda_search=False, alpha=[1])
print("Test model on validation set.")
validDataResponse = np.genfromtxt(tests.locate("smalldata/arcene/arcene_valid_labels.labels"), delimiter=' ')
validDataResponse = np.where(validDataResponse == -1, 0, 1)
validDataFeatures = np.genfromtxt(tests.locate("smalldata/arcene/arcene_valid.data"), delimiter=' ')
validData = h2o.H2OFrame(np.column_stack((validDataResponse, validDataFeatures)).tolist())
prediction = model.predict(validData)
print("Check performance of predictions.")
performance = model.model_performance(validData)
print("Check that prediction AUC better than guessing (0.5).")
assert performance.auc() > 0.5, "predictions should be better then pure chance"
if __name__ == "__main__":
tests.run_test(sys.argv, wide_dataset_large)
开发者ID:kyoren,项目名称:https-github.com-h2oai-h2o-3,代码行数:30,代码来源:pyunit_wide_dataset_largeGLM.py
示例19:
# Ratio of y = 1 per Level: cat01 = 1.0 (strong predictor), cat02 to cat10 = 0.5 (weak predictors)
#Log.info("Importing swpreds_1000x3.csv data...\n")
swpreds = h2o.import_file(path=tests.locate("smalldata/gbm_test/swpreds_1000x3.csv"))
swpreds["y"] = swpreds["y"].asfactor()
#Log.info("Summary of swpreds_1000x3.csv from H2O:\n")
#swpreds.summary()
# Train H2O GBM without Noise Column
#Log.info("H2O GBM with parameters:\nntrees = 50, max_depth = 20, nbins = 500\n")
h2o_gbm_model1 = h2o.gbm(x=swpreds[["X1"]], y=swpreds["y"], distribution="bernoulli", ntrees=50, max_depth=20,
nbins=500)
h2o_gbm_model1.show()
h2o_gbm_perf1 = h2o_gbm_model1.model_performance(swpreds)
h2o_auc1 = h2o_gbm_perf1.auc()
# Train H2O GBM Model including Noise Column:
#Log.info("H2O GBM with parameters:\nntrees = 50, max_depth = 20, nbins = 500\n")
h2o_gbm_model2 = h2o.gbm(x=swpreds[["X1","X2"]], y=swpreds["y"], distribution="bernoulli", ntrees=50, max_depth=20,
nbins=500)
h2o_gbm_model2.show()
h2o_gbm_perf2 = h2o_gbm_model2.model_performance(swpreds)
h2o_auc2 = h2o_gbm_perf2.auc()
if __name__ == "__main__":
tests.run_test(sys.argv, swpredsGBM)
开发者ID:kyoren,项目名称:https-github.com-h2oai-h2o-3,代码行数:30,代码来源:pyunit_swpredsGBM.py
示例20: get_modelGBM
import sys
sys.path.insert(1, "../../../")
import h2o, tests
def get_modelGBM():
prostate = h2o.import_file(path=tests.locate("smalldata/logreg/prostate.csv"))
prostate.describe()
prostate[1] = prostate[1].asfactor()
prostate_gbm = h2o.gbm(y=prostate[1], x=prostate[2:9], distribution="bernoulli")
prostate_gbm.show()
prostate_gbm.predict(prostate)
model = h2o.get_model(prostate_gbm._id)
model.show()
if __name__ == "__main__":
tests.run_test(sys.argv, get_modelGBM)
开发者ID:kyoren,项目名称:https-github.com-h2oai-h2o-3,代码行数:20,代码来源:pyunit_get_modelGBM.py
注:本文中的tests.run_test函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论