本文整理汇总了Python中mantid.api.AnalysisDataService类的典型用法代码示例。如果您正苦于以下问题:Python AnalysisDataService类的具体用法?Python AnalysisDataService怎么用?Python AnalysisDataService使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AnalysisDataService类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_that_can_load_isis_nexus_file_with_event_data_and_multi_period
def test_that_can_load_isis_nexus_file_with_event_data_and_multi_period(self):
# Arrange
state = SANSLoadTest._get_simple_state(sample_scatter="LARMOR00013065.nxs",
calibration="80tubeCalibration_18-04-2016_r9330-9335.nxs")
# Act
output_workspace_names = {"SampleScatterWorkspace": "sample_scatter",
"SampleScatterMonitorWorkspace": "sample_monitor_scatter"}
load_alg = self._run_load(state, publish_to_cache=True, use_cached=True, move_workspace=False,
output_workspace_names=output_workspace_names)
# Assert
expected_number_of_workspaces = [4, 0, 0, 0, 0, 0]
expected_number_on_ads = 1
workspace_type = [EventWorkspace, None, None, None, None, None]
self._do_test_output(load_alg, expected_number_of_workspaces, expected_number_on_ads, workspace_type)
# Check that calibration is added
self.assertTrue(SANSLoadTest._has_calibration_been_applied(load_alg))
# Confirm that the ADS workspace contains the calibration file
try:
AnalysisDataService.retrieve("80tubeCalibration_18-04-2016_r9330-9335")
on_ads = True
except RuntimeError:
on_ads = False
self.assertTrue(on_ads)
# Cleanup
remove_all_workspaces_from_ads()
开发者ID:samueljackson92,项目名称:mantid,代码行数:30,代码来源:SANSLoadTest.py
示例2: get_weighted_peak_centres
def get_weighted_peak_centres(self):
""" Get the peak centers found in peak workspace.
Guarantees: the peak centers and its weight (detector counts) are exported
:return: 2-tuple: list of 3-tuple (Qx, Qy, Qz)
list of double (Det_Counts)
"""
# get PeaksWorkspace
if AnalysisDataService.doesExist(self._myPeakWorkspaceName) is False:
raise RuntimeError('PeaksWorkspace %s does ot exit.' % self._myPeakWorkspaceName)
peak_ws = AnalysisDataService.retrieve(self._myPeakWorkspaceName)
# get peak center, peak intensity and etc.
peak_center_list = list()
peak_intensity_list = list()
num_peaks = peak_ws.getNumberPeaks()
for i_peak in xrange(num_peaks):
peak_i = peak_ws.getPeak(i_peak)
center_i = peak_i.getQSampleFrame()
intensity_i = peak_i.getIntensity()
peak_center_list.append((center_i.X(), center_i.Y(), center_i.Z()))
peak_intensity_list.append(intensity_i)
# END-FOR
return peak_center_list, peak_intensity_list
开发者ID:peterfpeterson,项目名称:mantid,代码行数:25,代码来源:peakprocesshelper.py
示例3: retrieve_hkl_from_spice_table
def retrieve_hkl_from_spice_table(self):
""" Get averaged HKL from SPICE table
HKL will be averaged from SPICE table by assuming the value in SPICE might be right
:return:
"""
# get SPICE table
spice_table_name = get_spice_table_name(self._myExpNumber, self._myScanNumber)
assert AnalysisDataService.doesExist(spice_table_name), 'Spice table for exp %d scan %d cannot be found.' \
'' % (self._myExpNumber, self._myScanNumber)
spice_table_ws = AnalysisDataService.retrieve(spice_table_name)
# get HKL column indexes
h_col_index = spice_table_ws.getColumnNames().index('h')
k_col_index = spice_table_ws.getColumnNames().index('k')
l_col_index = spice_table_ws.getColumnNames().index('l')
# scan each Pt.
hkl = numpy.array([0., 0., 0.])
num_rows = spice_table_ws.rowCount()
for row_index in xrange(num_rows):
mi_h = spice_table_ws.cell(row_index, h_col_index)
mi_k = spice_table_ws.cell(row_index, k_col_index)
mi_l = spice_table_ws.cell(row_index, l_col_index)
hkl += numpy.array([mi_h, mi_k, mi_l])
# END-FOR
self._spiceHKL = hkl/num_rows
return
开发者ID:peterfpeterson,项目名称:mantid,代码行数:31,代码来源:peakprocesshelper.py
示例4: _createTwoCurves
def _createTwoCurves(self, datawsname):
""" Create data workspace
"""
E = np.arange(-50, 50, 1.0)
# curve 1
I = 1000 * np.exp(-E**2/10**2)
err = I ** .5
# curve 2
I2 = 1000 * (1+np.sin(E/5*np.pi))
err2 = I ** .5
# workspace
ws = WorkspaceFactory.create(
"Workspace2D", NVectors=2,
XLength = E.size, YLength = I.size
)
# curve1
ws.dataX(0)[:] = E
ws.dataY(0)[:] = I
ws.dataE(0)[:] = err
# curve2
ws.dataX(1)[:] = E
ws.dataY(1)[:] = I2
ws.dataE(1)[:] = err2
# Add to data service
AnalysisDataService.addOrReplace(datawsname, ws)
return E, I, err, I2, err2
开发者ID:liyulun,项目名称:mantid,代码行数:26,代码来源:SavePlot1DAsJsonTest.py
示例5: test_LoadHKLFile
def test_LoadHKLFile(self):
""" Test to load a .hkl file
"""
# 1. Create a test file
hklfilename = "test.hkl"
self._createHKLFile(hklfilename)
# 2.
alg_test = run_algorithm("LoadFullprofFile", Filename = hklfilename,
OutputWorkspace = "Foo", PeakParameterWorkspace = "PeakParameterTable")
self.assertTrue(alg_test.isExecuted())
# 3. Verify some values
tablews = AnalysisDataService.retrieve("PeakParameterTable")
self.assertEqual(4, tablews.rowCount())
# alpha of (11 5 1)/Row 0
self.assertEqual(0.34252, tablews.cell(0, 3))
# 4. Delete the test hkl file
os.remove(hklfilename)
AnalysisDataService.remove("PeakParameterTable")
AnalysisDataService.remove("Foo")
return
开发者ID:DanNixon,项目名称:mantid,代码行数:26,代码来源:LoadFullprofFileTest.py
示例6: test_LoadPRFFile
def test_LoadPRFFile(self):
""" Test to load a .prf file
"""
# 1. Create test .prf file
prffilename = "test.prf"
self._createPrfFile(prffilename)
# 2. Execute the algorithm
alg_test = run_algorithm("LoadFullprofFile",
Filename = prffilename,
OutputWorkspace = "Data",
PeakParameterWorkspace = "Info")
self.assertTrue(alg_test.isExecuted())
# 3. Check data
dataws = AnalysisDataService.retrieve("Data")
self.assertEqual(dataws.getNumberHistograms(), 4)
self.assertEqual(len(dataws.readX(0)), 36)
# value
self.assertEqual(dataws.readX(0)[13], 5026.3223)
self.assertEqual(dataws.readY(1)[30], 0.3819)
# 4. Clean
os.remove(prffilename)
AnalysisDataService.remove("Data")
AnalysisDataService.remove("Info")
return
开发者ID:DanNixon,项目名称:mantid,代码行数:31,代码来源:LoadFullprofFileTest.py
示例7: edit_matrix_workspace
def edit_matrix_workspace(sq_name, scale_factor, shift, edited_sq_name=None):
"""
Edit the matrix workspace of S(Q) by scaling and shift
:param sq_name: name of the SofQ workspace
:param scale_factor:
:param shift:
:param edited_sq_name: workspace for the edited S(Q)
:return:
"""
# get the workspace
if AnalysisDataService.doesExist(sq_name) is False:
raise RuntimeError('S(Q) workspace {0} cannot be found in ADS.'.format(sq_name))
if edited_sq_name is not None:
simpleapi.CloneWorkspace(InputWorkspace=sq_name, OutputWorkspace=edited_sq_name)
sq_ws = AnalysisDataService.retrieve(edited_sq_name)
else:
sq_ws = AnalysisDataService.retrieve(sq_name)
# get the vector of Y
sq_ws = sq_ws * scale_factor
sq_ws = sq_ws + shift
if sq_ws.name() != edited_sq_name:
simpleapi.DeleteWorkspace(Workspace=edited_sq_name)
simpleapi.RenameWorkspace(InputWorkspace=sq_ws, OutputWorkspace=edited_sq_name)
assert sq_ws is not None, 'S(Q) workspace cannot be None.'
print('[DB...BAT] S(Q) workspace that is edit is {0}'.format(sq_ws))
开发者ID:neutrons,项目名称:FastGR,代码行数:28,代码来源:addiedriver.py
示例8: cleanup
def cleanup(self):
if AnalysisDataService.doesExist(self._input_wksp):
DeleteWorkspace(self._input_wksp)
if AnalysisDataService.doesExist(self._output_wksp):
DeleteWorkspace(self._output_wksp)
if AnalysisDataService.doesExist(self._correction_wksp):
DeleteWorkspace(self._correction_wksp)
开发者ID:mantidproject,项目名称:mantid,代码行数:7,代码来源:CalculateEfficiencyCorrectionTest.py
示例9: test_setTitle
def test_setTitle(self):
run_algorithm('CreateWorkspace', OutputWorkspace='ws1',DataX=[1.,2.,3.], DataY=[2.,3.], DataE=[2.,3.],UnitX='TOF')
ws1 = AnalysisDataService['ws1']
title = 'test_title'
ws1.setTitle(title)
self.assertEquals(title, ws1.getTitle())
AnalysisDataService.remove(ws1.getName())
开发者ID:trnielsen,项目名称:mantid,代码行数:7,代码来源:MatrixWorkspaceTest.py
示例10: test_add_raises_error_if_name_exists
def test_add_raises_error_if_name_exists(self):
data = [1.0,2.0,3.0]
alg = run_algorithm('CreateWorkspace',DataX=data,DataY=data,NSpec=1,UnitX='Wavelength', child=True)
name = "testws"
ws = alg.getProperty("OutputWorkspace").value
AnalysisDataService.addOrReplace(name, ws)
self.assertRaises(RuntimeError, AnalysisDataService.add, name, ws)
开发者ID:mantidproject,项目名称:mantid,代码行数:7,代码来源:AnalysisDataServiceTest.py
示例11: test_batch_reduction_on_multiperiod_file
def test_batch_reduction_on_multiperiod_file(self):
# Arrange
# Build the data information
data_builder = get_data_builder(SANSFacility.ISIS)
data_builder.set_sample_scatter("SANS2D0005512")
data_info = data_builder.build()
# Get the rest of the state from the user file
user_file_director = StateDirectorISIS(data_info)
user_file_director.set_user_file("MASKSANS2Doptions.091A")
# Set the reduction mode to LAB
user_file_director.set_reduction_builder_reduction_mode(ISISReductionMode.LAB)
state = user_file_director.construct()
# Act
states = [state]
self._run_batch_reduction(states, use_optimizations=False)
# Assert
# We only assert that the expected workspaces exist on the ADS
expected_workspaces = ["5512p1rear_1D_2.0_14.0Phi-45.0_45.0", "5512p2rear_1D_2.0_14.0Phi-45.0_45.0",
"5512p3rear_1D_2.0_14.0Phi-45.0_45.0", "5512p4rear_1D_2.0_14.0Phi-45.0_45.0",
"5512p5rear_1D_2.0_14.0Phi-45.0_45.0", "5512p6rear_1D_2.0_14.0Phi-45.0_45.0",
"5512p7rear_1D_2.0_14.0Phi-45.0_45.0", "5512p8rear_1D_2.0_14.0Phi-45.0_45.0",
"5512p9rear_1D_2.0_14.0Phi-45.0_45.0", "5512p10rear_1D_2.0_14.0Phi-45.0_45.0",
"5512p11rear_1D_2.0_14.0Phi-45.0_45.0", "5512p12rear_1D_2.0_14.0Phi-45.0_45.0",
"5512p13rear_1D_2.0_14.0Phi-45.0_45.0"]
for element in expected_workspaces:
self.assertTrue(AnalysisDataService.doesExist(element))
# Clean up
for element in expected_workspaces:
AnalysisDataService.remove(element)
开发者ID:DanNixon,项目名称:mantid,代码行数:34,代码来源:SANSBatchReductionTest.py
示例12: test_len_increases_when_item_added
def test_len_increases_when_item_added(self):
wsname = 'ADSTest_test_len_increases_when_item_added'
current_len = len(AnalysisDataService)
self._run_createws(wsname)
self.assertEquals(len(AnalysisDataService), current_len + 1)
# Remove to clean the test up
AnalysisDataService.remove(wsname)
开发者ID:AlistairMills,项目名称:mantid,代码行数:7,代码来源:AnalysisDataServiceTest.py
示例13: test_saveGSS
def test_saveGSS(self):
""" Test to Save a GSAS file to match V-drive
"""
# Create a test data file and workspace
binfilename = "testbin.dat"
self._createBinFile(binfilename)
datawsname = "TestInputWorkspace"
self._createDataWorkspace(datawsname)
# Execute
alg_test = run_algorithm("SaveVulcanGSS",
InputWorkspace = datawsname,
BinFilename = binfilename,
OutputWorkspace = datawsname+"_rebinned",
GSSFilename = "tempout.gda")
self.assertTrue(alg_test.isExecuted())
# Verify ....
outputws = AnalysisDataService.retrieve(datawsname+"_rebinned")
#self.assertEqual(4, tablews.rowCount())
# Delete the test hkl file
os.remove(binfilename)
AnalysisDataService.remove("InputWorkspace")
AnalysisDataService.remove(datawsname+"_rebinned")
return
开发者ID:rosswhitfield,项目名称:mantid,代码行数:29,代码来源:SaveVulcanGSSTest.py
示例14: _createDataWorkspace
def _createDataWorkspace(self, datawsname):
""" Create data workspace
"""
import math
tof0 = 4900.
delta = 0.001
numpts = 200
vecx = []
vecy = []
vece = []
tof = tof0
for n in range(numpts):
vecx.append(tof)
vecy.append(math.sin(tof0))
vece.append(1.)
tof = tof * (1+delta)
# ENDFOR
vecx.append(tof)
dataws = api.CreateWorkspace(DataX = vecx, DataY = vecy, DataE = vece, NSpec = 1,
UnitX = "TOF")
# Add to data service
AnalysisDataService.addOrReplace(datawsname, dataws)
return dataws
开发者ID:rosswhitfield,项目名称:mantid,代码行数:30,代码来源:SaveVulcanGSSTest.py
示例15: test_that_can_find_can_reduction_if_it_exists
def test_that_can_find_can_reduction_if_it_exists(self):
# Arrange
test_director = TestDirector()
state = test_director.construct()
tagged_workspace_names = {None: "test_ws",
OutputParts.Count: "test_ws_count",
OutputParts.Norm: "test_ws_norm"}
SANSFunctionsTest._prepare_workspaces(number_of_workspaces=4,
tagged_workspace_names=tagged_workspace_names,
state=state,
reduction_mode=ISISReductionMode.LAB)
# Act
workspace, workspace_count, workspace_norm = get_reduced_can_workspace_from_ads(state, output_parts=True,
reduction_mode=ISISReductionMode.LAB) # noqa
# Assert
self.assertTrue(workspace is not None)
self.assertTrue(workspace.name() == AnalysisDataService.retrieve("test_ws").name())
self.assertTrue(workspace_count is not None)
self.assertTrue(workspace_count.name() == AnalysisDataService.retrieve("test_ws_count").name())
self.assertTrue(workspace_norm is not None)
self.assertTrue(workspace_norm.name() == AnalysisDataService.retrieve("test_ws_norm").name())
# Clean up
SANSFunctionsTest._remove_workspaces()
开发者ID:mantidproject,项目名称:mantid,代码行数:25,代码来源:general_functions_test.py
示例16: testConvertUnits
def testConvertUnits(self):
# test whether CorrectTof+ConvertUnits+ConvertToDistribution will give the same result as TOFTOFConvertTOFToDeltaE
OutputWorkspaceName = "outputws1"
alg_test = run_algorithm("CorrectTOF", InputWorkspace=self._input_ws, EPPTable=self._table, OutputWorkspace=OutputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wscorr = AnalysisDataService.retrieve(OutputWorkspaceName)
# convert units, convert to distribution
alg_cu = run_algorithm("ConvertUnits", InputWorkspace=wscorr, Target='DeltaE', EMode='Direct', EFixed=2.27, OutputWorkspace=OutputWorkspaceName+'_dE')
ws_dE = AnalysisDataService.retrieve(OutputWorkspaceName+'_dE')
alg_cd = run_algorithm("ConvertToDistribution", Workspace=ws_dE)
# create reference data for X axis
tof1 = 2123.33867005
dataX = self._input_ws.readX(0) - tof1
tel = 8189.5 - tof1
factor = m_n*1e+15/eV
newX = 0.5*factor*16.0*(1/tel**2 - 1/dataX**2)
# compare
# self.assertEqual(newX[0], ws_dE.readX(0)[0])
self.assertTrue(np.allclose(newX, ws_dE.readX(0), atol=0.01))
# create reference data for Y axis and compare to the output
tof = dataX[:-1] + 5.25
newY = self._input_ws.readY(0)*tof**3/(factor*10.5*16.0)
# compare
self.assertTrue(np.allclose(newY, ws_dE.readY(0), rtol=0.01))
run_algorithm("DeleteWorkspace", Workspace=ws_dE)
run_algorithm("DeleteWorkspace", Workspace=wscorr)
开发者ID:DanNixon,项目名称:mantid,代码行数:30,代码来源:CorrectTOFTest.py
示例17: test_exportFileNew
def test_exportFileNew(self):
""" Test to export logs without header file
"""
# Generate the matrix workspace with some logs
ws = self.createTestWorkspace()
AnalysisDataService.addOrReplace("TestMatrixWS", ws)
# Test algorithm
alg_test = run_algorithm("ExportExperimentLog",
InputWorkspace = "TestMatrixWS",
OutputFilename = "TestRecord001.txt",
SampleLogNames = ["run_number", "duration", "proton_charge", "proton_charge", "proton_charge"],
SampleLogTitles = ["RUN", "Duration", "ProtonCharge", "MinPCharge", "MeanPCharge"],
SampleLogOperation = [None, None, "sum", "min", "average"],
FileMode = "new")
# Validate
self.assertTrue(alg_test.isExecuted())
# Locate file
outfilename = alg_test.getProperty("OutputFilename").value
try:
ifile = open(outfilename)
lines = ifile.readlines()
ifile.close()
except IOError as err:
print("Unable to open file {0}.".format(outfilename))
self.assertTrue(False)
return
# Last line cannot be empty, i.e., before EOF '\n' is not allowed
lastline = lines[-1]
self.assertTrue(len(lastline.strip()) > 0)
# Number of lines
self.assertEquals(len(lines), 2)
# Check line
firstdataline = lines[1]
terms = firstdataline.strip().split("\t")
self.assertEquals(len(terms), 5)
# Get property
pchargelog = ws.getRun().getProperty("proton_charge").value
sumpcharge = numpy.sum(pchargelog)
minpcharge = numpy.min(pchargelog)
avgpcharge = numpy.average(pchargelog)
v2 = float(terms[2])
self.assertAlmostEqual(sumpcharge, v2)
v3 = float(terms[3])
self.assertAlmostEqual(minpcharge, v3)
v4 = float(terms[4])
self.assertAlmostEqual(avgpcharge, v4)
# Remove generated files
os.remove(outfilename)
AnalysisDataService.remove("TestMatrixWS")
return
开发者ID:DanNixon,项目名称:mantid,代码行数:60,代码来源:ExportExperimentLogTest.py
示例18: test_DNSFRSelfCorrection
def test_DNSFRSelfCorrection(self):
outputWorkspaceName = "DNSFlippingRatioCorrTest_Test4"
# consider normalization=1.0 as set in self._create_fake_workspace
dataws_sf = self.__sf_nicrws - self.__sf_bkgrws
dataws_nsf = self.__nsf_nicrws - self.__nsf_bkgrws
alg_test = run_algorithm("DNSFlippingRatioCorr", SFDataWorkspace=dataws_sf,
NSFDataWorkspace=dataws_nsf, SFNiCrWorkspace=self.__sf_nicrws.getName(),
NSFNiCrWorkspace=self.__nsf_nicrws.getName(), SFBkgrWorkspace=self.__sf_bkgrws.getName(),
NSFBkgrWorkspace=self.__nsf_bkgrws.getName(), SFOutputWorkspace=outputWorkspaceName+'SF',
NSFOutputWorkspace=outputWorkspaceName+'NSF')
self.assertTrue(alg_test.isExecuted())
# check whether the data are correct
ws_sf = AnalysisDataService.retrieve(outputWorkspaceName + 'SF')
ws_nsf = AnalysisDataService.retrieve(outputWorkspaceName + 'NSF')
# dimensions
self.assertEqual(24, ws_sf.getNumberHistograms())
self.assertEqual(24, ws_nsf.getNumberHistograms())
self.assertEqual(2, ws_sf.getNumDims())
self.assertEqual(2, ws_nsf.getNumDims())
# data array: spin-flip must be zero
for i in range(24):
self.assertAlmostEqual(0.0, ws_sf.readY(i)[0])
# data array: non spin-flip must be nsf - sf^2/nsf
nsf = np.array(dataws_nsf.extractY())
sf = np.array(dataws_sf.extractY())
refdata = nsf + sf
for i in range(24):
self.assertAlmostEqual(refdata[i][0], ws_nsf.readY(i)[0])
run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName + 'SF')
run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName + 'NSF')
run_algorithm("DeleteWorkspace", Workspace=dataws_sf)
run_algorithm("DeleteWorkspace", Workspace=dataws_nsf)
return
开发者ID:rosswhitfield,项目名称:mantid,代码行数:35,代码来源:DNSFlippingRatioCorrTest.py
示例19: _run_createws
def _run_createws(self, wsname):
"""
Run create workspace storing the output in the named workspace
"""
data = [1.0,2.0,3.0]
alg = run_algorithm('CreateWorkspace',DataX=data,DataY=data,NSpec=1,UnitX='Wavelength', child=True)
AnalysisDataService.addOrReplace(wsname, alg.getProperty("OutputWorkspace").value)
开发者ID:AlistairMills,项目名称:mantid,代码行数:7,代码来源:AnalysisDataServiceTest.py
示例20: test_exportFileAppend
def test_exportFileAppend(self):
""" Test to export logs without header file
"""
# Generate the matrix workspace with some logs
ws = self.createTestWorkspace()
AnalysisDataService.addOrReplace("TestMatrixWS", ws)
# Test algorithm
# create new file
alg_test = run_algorithm("ExportExperimentLog",
InputWorkspace = "TestMatrixWS",
OutputFilename = "TestRecord.txt",
SampleLogNames = ["run_number", "duration", "proton_charge"],
SampleLogTitles = ["RUN", "Duration", "ProtonCharge"],
SampleLogOperation = [None, None, "sum"],
FileMode = "new")
# append
alg_test = run_algorithm("ExportExperimentLog",
InputWorkspace = "TestMatrixWS",
OutputFilename = "TestRecord.txt",
SampleLogNames = ["run_number", "duration", "proton_charge"],
SampleLogTitles = ["RUN", "Duration", "ProtonCharge"],
SampleLogOperation = [None, None, "sum"],
FileMode = "fastappend")
# Validate
self.assertTrue(alg_test.isExecuted())
# Locate file
outfilename = alg_test.getProperty("OutputFilename").value
try:
print "Output file is %s. " % (outfilename)
ifile = open(outfilename)
lines = ifile.readlines()
ifile.close()
except IOError as err:
print "Unable to open file %s. " % (outfilename)
self.assertTrue(False)
return
# Last line cannot be empty, i.e., before EOF '\n' is not allowed
lastline = lines[-1]
self.assertTrue(len(lastline.strip()) > 0)
# Number of lines
self.assertEquals(len(lines), 3)
# Check line
firstdataline = lines[1]
terms = firstdataline.strip().split("\t")
self.assertEquals(len(terms), 3)
#
# # Remove generated files
os.remove(outfilename)
AnalysisDataService.remove("TestMatrixWS")
return
开发者ID:AlistairMills,项目名称:mantid,代码行数:59,代码来源:ExportExperimentLogTest.py
注:本文中的mantid.api.AnalysisDataService类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论