本文整理汇总了Java中org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil类的典型用法代码示例。如果您正苦于以下问题:Java ConfigurationUtil类的具体用法?Java ConfigurationUtil怎么用?Java ConfigurationUtil使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ConfigurationUtil类属于org.apache.pig.backend.hadoop.datastorage包,在下文中一共展示了ConfigurationUtil类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: fs
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
/**
* Run a filesystem command. Any output from this command is written to
* stdout or stderr as appropriate.
* @param cmd Filesystem command to run along with its arguments as one
* string.
* @throws IOException
*/
public static int fs(String cmd) throws IOException {
ScriptPigContext ctx = getScriptContext();
FsShell shell = new FsShell(ConfigurationUtil.toConfiguration(ctx
.getPigContext().getProperties()));
int code = -1;
if (cmd != null) {
String[] cmdTokens = cmd.split("\\s+");
if (!cmdTokens[0].startsWith("-")) cmdTokens[0] = "-" + cmdTokens[0];
try {
code = shell.run(cmdTokens);
} catch (Exception e) {
throw new IOException("Run filesystem command failed", e);
}
}
return code;
}
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:24,代码来源:Pig.java
示例2: setParams
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
public void setParams(PigServer pigServer)
{
mPigServer = pigServer;
mDfs = mPigServer.getPigContext().getDfs();
mLfs = mPigServer.getPigContext().getLfs();
mConf = mPigServer.getPigContext().getProperties();
shell = new FsShell(ConfigurationUtil.toConfiguration(mConf));
// TODO: this violates the abstraction layer decoupling between
// front end and back end and needs to be changed.
// Right now I am not clear on how the Job Id comes from to tell
// the back end to kill a given job (mJobClient is used only in
// processKill)
//
HExecutionEngine execEngine = mPigServer.getPigContext().getExecutionEngine();
mJobConf = execEngine.getJobConf();
}
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:19,代码来源:GruntParser.java
示例3: initialize
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
@Override
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
// initialize the underlying actual RecordReader with the right Context
// object - this is achieved by merging the Context corresponding to
// the input split this Reader is supposed to process with the context
// passed in.
this.pigSplit = (PigSplit)split;
this.context = context;
ConfigurationUtil.mergeConf(context.getConfiguration(),
inputSpecificConf);
// Pass loader signature to LoadFunc and to InputFormat through
// the conf
PigInputFormat.passLoadSignature(loadfunc, pigSplit.getInputIndex(),
context.getConfiguration());
// now invoke initialize() on underlying RecordReader with
// the "adjusted" conf
if (null != curReader) {
curReader.initialize(pigSplit.getWrappedSplit(), context);
loadfunc.prepareToRead(curReader, pigSplit);
}
if (pigSplit.isMultiInputs() && !pigSplit.disableCounter()) {
counterName = getMultiInputsCounerName(pigSplit, inputSpecificConf);
}
}
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:27,代码来源:PigRecordReader.java
示例4: setLocation
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
/**
* Before delegating calls to underlying OutputFormat or OutputCommitter
* Pig needs to ensure the Configuration in the JobContext contains
* the output location and StoreFunc
* for the specific store - so set these up in the context for this specific
* store
* @param jobContext the {@link JobContext}
* @param store the POStore
* @throws IOException on failure
*/
public static void setLocation(JobContext jobContext, POStore store) throws
IOException {
Job storeJob = new Job(jobContext.getConfiguration());
StoreFuncInterface storeFunc = store.getStoreFunc();
String outputLocation = store.getSFile().getFileName();
storeFunc.setStoreLocation(outputLocation, storeJob);
// the setStoreLocation() method would indicate to the StoreFunc
// to set the output location for its underlying OutputFormat.
// Typically OutputFormat's store the output location in the
// Configuration - so we need to get the modified Configuration
// containing the output location (and any other settings the
// OutputFormat might have set) and merge it with the Configuration
// we started with so that when this method returns the Configuration
// supplied as input has the updates.
ConfigurationUtil.mergeConf(jobContext.getConfiguration(),
storeJob.getConfiguration());
}
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:29,代码来源:PigOutputFormat.java
示例5: testDefaultParallel
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
@Test
public void testDefaultParallel() throws Throwable {
pc.defaultParallel = 100;
String query = "a = load 'input';" + "b = group a by $0;" + "store b into 'output';";
PigServer ps = new PigServer(ExecType.MAPREDUCE, cluster.getProperties());
PhysicalPlan pp = Util.buildPp(ps, query);
MROperPlan mrPlan = Util.buildMRPlan(pp, pc);
ConfigurationValidator.validatePigProperties(pc.getProperties());
Configuration conf = ConfigurationUtil.toConfiguration(pc.getProperties());
JobControlCompiler jcc = new JobControlCompiler(pc, conf);
JobControl jobControl = jcc.compile(mrPlan, "Test");
Job job = jobControl.getWaitingJobs().get(0);
int parallel = job.getJobConf().getNumReduceTasks();
assertEquals(100, parallel);
Util.assertParallelValues(100, -1, -1, 100, job.getJobConf());
pc.defaultParallel = -1;
}
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:23,代码来源:TestJobSubmission.java
示例6: genNewLoadStorePlan
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
private LogicalPlan genNewLoadStorePlan(String inputFile,
String outputFile, DataStorage dfs)
throws Throwable {
LogicalPlan plan = new LogicalPlan() ;
FileSpec filespec1 =
new FileSpec(inputFile, new FuncSpec("org.apache.pig.builtin.PigStorage")) ;
FileSpec filespec2 =
new FileSpec(outputFile, new FuncSpec("org.apache.pig.builtin.PigStorage"));
LOLoad load = newLOLoad( filespec1, null, plan,
ConfigurationUtil.toConfiguration(dfs.getConfiguration())) ;
LOStore store = new LOStore(plan, filespec2, (StoreFuncInterface)PigContext.instantiateFuncFromSpec(filespec2.getFuncSpec()), null) ;
plan.add(load) ;
plan.add(store) ;
plan.connect(load, store) ;
return plan ;
}
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:20,代码来源:TestInputOutputFileValidator.java
示例7: generateLogicalPlan
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
/**
*
* Generate a {@link LogicalPlan} containing a Load, Filter and Store
* operators
*
* @param inputFile
* @param outputFile
* @param dfs
* @return
* @throws Exception
*/
private LogicalPlan generateLogicalPlan(String inputFile,
String outputFile, DataStorage dfs) throws Exception {
LogicalPlan plan = new LogicalPlan();
FileSpec filespec1 = new FileSpec(generateTmpFile(inputFile).getAbsolutePath(), new FuncSpec("org.apache.pig.builtin.PigStorage"));
FileSpec filespec2 = new FileSpec(generateTmpFile(outputFile).getAbsolutePath(), new FuncSpec("org.apache.pig.builtin.PigStorage"));
LOLoad load = newLOLoad(filespec1, null, plan, ConfigurationUtil.toConfiguration(dfs.getConfiguration()));
LOStore store = new LOStore(plan, filespec2, (StoreFuncInterface) PigContext.instantiateFuncFromSpec(filespec2.getFuncSpec()), null);
LOFilter filter = new LOFilter(plan);
plan.add(load);
plan.add(store);
plan.add(filter);
plan.connect(load, filter);
plan.connect(filter, store);
return plan;
}
开发者ID:sigmoidanalytics,项目名称:spork,代码行数:31,代码来源:TestBlackAndWhitelistValidator.java
示例8: checkAndCleanup
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
private void checkAndCleanup(ExecType execType, String expectedFileName,
String inputFileName) throws IOException {
if (execType == ExecType.MAPREDUCE) {
FileSystem fs = FileSystem.get(ConfigurationUtil.toConfiguration(
cluster.getProperties()));
assertTrue(fs.exists(new Path(expectedFileName)));
Util.deleteFile(cluster, inputFileName);
Util.deleteFile(cluster, expectedFileName);
} else if (execType == ExecType.LOCAL) {
File f = new File(expectedFileName);
assertTrue(f.exists());
f.delete();
} else {
throw new IllegalArgumentException("invalid excetype " + execType.
toString());
}
}
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:18,代码来源:TestFinish.java
示例9: testGroupConstWithParallel
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
/**
* Test parallelism for group by constant
* @throws Throwable
*/
@Test
public void testGroupConstWithParallel() throws Throwable {
PigContext pc = new PigContext(ExecType.MAPREDUCE, cluster.getProperties());
pc.defaultParallel = 100;
pc.connect();
String query = "a = load 'input';\n" + "b = group a by 1;" + "store b into 'output';";
PigServer pigServer = new PigServer( ExecType.MAPREDUCE, cluster.getProperties() );
PhysicalPlan pp = Util.buildPp( pigServer, query );
MROperPlan mrPlan = Util.buildMRPlan(pp, pc);
ConfigurationValidator.validatePigProperties(pc.getProperties());
Configuration conf = ConfigurationUtil.toConfiguration(pc.getProperties());
JobControlCompiler jcc = new JobControlCompiler(pc, conf);
JobControl jobControl = jcc.compile(mrPlan, "Test");
Job job = jobControl.getWaitingJobs().get(0);
int parallel = job.getJobConf().getNumReduceTasks();
assertEquals("parallism", 1, parallel);
}
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:27,代码来源:TestGroupConstParallel.java
示例10: testGroupNonConstWithParallel
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
/**
* Test parallelism for group by column
* @throws Throwable
*/
@Test
public void testGroupNonConstWithParallel() throws Throwable {
PigContext pc = new PigContext(ExecType.MAPREDUCE, cluster.getProperties());
pc.defaultParallel = 100;
pc.connect();
PigServer pigServer = new PigServer( ExecType.MAPREDUCE, cluster.getProperties() );
String query = "a = load 'input';\n" + "b = group a by $0;" + "store b into 'output';";
PhysicalPlan pp = Util.buildPp( pigServer, query );
MROperPlan mrPlan = Util.buildMRPlan(pp, pc);
ConfigurationValidator.validatePigProperties(pc.getProperties());
Configuration conf = ConfigurationUtil.toConfiguration(pc.getProperties());
JobControlCompiler jcc = new JobControlCompiler(pc, conf);
JobControl jobControl = jcc.compile(mrPlan, "Test");
Job job = jobControl.getWaitingJobs().get(0);
int parallel = job.getJobConf().getNumReduceTasks();
assertEquals("parallism", 100, parallel);
}
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:27,代码来源:TestGroupConstParallel.java
示例11: testBinStorageGetSchema
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
@Test
public void testBinStorageGetSchema() throws IOException, ParserException {
String input[] = new String[] { "hello\t1\t10.1", "bye\t2\t20.2" };
String inputFileName = "testGetSchema-input.txt";
String outputFileName = "testGetSchema-output.txt";
try {
Util.createInputFile(pig.getPigContext(),
inputFileName, input);
String query = "a = load '" + inputFileName + "' as (c:chararray, " +
"i:int,d:double);store a into '" + outputFileName + "' using " +
"BinStorage();";
pig.setBatchOn();
Util.registerMultiLineQuery(pig, query);
pig.executeBatch();
ResourceSchema rs = new BinStorage().getSchema(outputFileName,
new Job(ConfigurationUtil.toConfiguration(pig.getPigContext().
getProperties())));
Schema expectedSchema = Utils.getSchemaFromString(
"c:chararray,i:int,d:double");
assertTrue("Checking binstorage getSchema output", Schema.equals(
expectedSchema, Schema.getPigSchema(rs), true, true));
} finally {
Util.deleteFile(pig.getPigContext(), inputFileName);
Util.deleteFile(pig.getPigContext(), outputFileName);
}
}
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:27,代码来源:TestStore.java
示例12: testLFText
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
/**
* test {@link TextLoader} - this also tests that {@link TextLoader} is capable
* of reading data a couple of dirs deep when the input specified is the top
* level directory
*/
@Test
public void testLFText() throws Exception {
String input1 = "This is some text.\nWith a newline in it.\n";
String expected1 = "This is some text.";
String expected2 = "With a newline in it.";
Util.createInputFile(cluster,
"testLFTest-input1.txt",
new String[] {input1});
// check that loading the top level dir still reading the file a couple
// of subdirs below
LoadFunc text1 = new ReadToEndLoader(new TextLoader(), ConfigurationUtil.
toConfiguration(cluster.getProperties()), "testLFTest-input1.txt", 0);
Tuple f1 = text1.getNext();
Tuple f2 = text1.getNext();
Util.deleteFile(cluster, "testLFTest-input1.txt");
assertTrue(expected1.equals(f1.get(0).toString()) &&
expected2.equals(f2.get(0).toString()));
Util.createInputFile(cluster, "testLFTest-input2.txt", new String[] {});
LoadFunc text2 = new ReadToEndLoader(new TextLoader(), ConfigurationUtil.
toConfiguration(cluster.getProperties()), "testLFTest-input2.txt", 0);
Tuple f3 = text2.getNext();
Util.deleteFile(cluster, "testLFTest-input2.txt");
assertTrue(f3 == null);
}
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:30,代码来源:TestBuiltin.java
示例13: fs
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
/**
* Run a filesystem command. Any output from this command is written to
* stdout or stderr as appropriate.
* @param cmd Filesystem command to run along with its arguments as one
* string.
* @throws IOException
*/
public static int fs(String cmd) throws IOException {
ScriptPigContext ctx = getScriptContext();
FsShell shell = new FsShell(ConfigurationUtil.toConfiguration(ctx
.getPigContext().getProperties()));
int code = -1;
if (cmd != null) {
String[] cmdTokens = cmd.split("\\s+");
if (!cmdTokens[0].startsWith("-")) cmdTokens[0] = "-" + cmdTokens[0];
try {
code = shell.run(cmdTokens);
} catch (Exception e) {
throw new IOException("Run filesystem command failed", e);
}
}
return code;
}
开发者ID:sigmoidanalytics,项目名称:spork,代码行数:24,代码来源:Pig.java
示例14: EmptyPigStats
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
public EmptyPigStats(PigContext pigContext, POStore poStore) {
super.pigContext = pigContext;
super.startTime = super.endTime = System.currentTimeMillis();
super.userId = System.getProperty("user.name");
Configuration conf = ConfigurationUtil.toConfiguration(pigContext.getProperties());
// initialize empty stats
OutputStats os = new OutputStats(null, -1, -1, true);
os.setConf(conf);
os.setPOStore(poStore);
this.outputStatsList = Collections.unmodifiableList(Arrays.asList(os));
InputStats is = new InputStats(null, -1, -1, true);
is.setConf(conf);
this.inputStatsList = Collections.unmodifiableList(Arrays.asList(is));
}
开发者ID:sigmoidanalytics,项目名称:spork,代码行数:18,代码来源:EmptyPigStats.java
示例15: initRightLoader
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
private void initRightLoader(int [] splitsToBeRead) throws IOException{
PigContext pc = (PigContext) ObjectSerializer
.deserialize(PigMapReduce.sJobConfInternal.get().get("pig.pigContext"));
Configuration conf = ConfigurationUtil.toConfiguration(pc.getProperties());
// Hadoop security need this property to be set
if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
conf.set(MRConfiguration.JOB_CREDENTIALS_BINARY,
System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
}
//create ReadToEndLoader that will read the given splits in order
loader = new ReadToEndLoader((LoadFunc)PigContext.instantiateFuncFromSpec(rightLoaderFuncSpec),
conf, inpLocation, splitsToBeRead);
}
开发者ID:sigmoidanalytics,项目名称:spork,代码行数:17,代码来源:DefaultIndexableLoader.java
示例16: testPigTempDir
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
@Test
public void testPigTempDir() throws Throwable {
Properties properties = PropertiesUtil.loadDefaultProperties();
File pigTempDir = new File(tempDir, FILE_SEPARATOR + "tmp" + FILE_SEPARATOR + "test");
properties.put("pig.temp.dir", pigTempDir.getPath());
PigContext pigContext=new PigContext(ExecType.LOCAL, properties);
pigContext.connect();
FileLocalizer.setInitialized(false);
String tempPath= FileLocalizer.getTemporaryPath(pigContext).toString();
Path path = new Path(tempPath);
assertTrue(tempPath.startsWith(pigTempDir.toURI().toString()));
FileSystem fs = FileSystem.get(path.toUri(),
ConfigurationUtil.toConfiguration(pigContext.getProperties()));
FileStatus status = fs.getFileStatus(path.getParent());
// Temporary root dir should have 700 as permission
assertEquals("rwx------", status.getPermission().toString());
pigTempDir.delete();
FileLocalizer.setInitialized(false);
}
开发者ID:sigmoidanalytics,项目名称:spork,代码行数:22,代码来源:TestPigServer.java
示例17: setLocation
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
/**
* Before delegating calls to underlying OutputFormat or OutputCommitter
* Pig needs to ensure the Configuration in the JobContext contains
* the output location and StoreFunc
* for the specific store - so set these up in the context for this specific
* store
* @param jobContext the {@link JobContext}
* @param store the POStore
* @throws IOException on failure
*/
public static void setLocation(JobContext jobContext, POStore store) throws
IOException {
Job storeJob = new Job(jobContext.getConfiguration());
StoreFuncInterface storeFunc = store.getStoreFunc();
String outputLocation = store.getSFile().getFileName();
storeFunc.setStoreLocation(outputLocation, storeJob);
// the setStoreLocation() method would indicate to the StoreFunc
// to set the output location for its underlying OutputFormat.
// Typically OutputFormat's store the output location in the
// Configuration - so we need to get the modified Configuration
// containing the output location (and any other settings the
// OutputFormat might have set) and merge it with the Configuration
// we started with so that when this method returns the Configuration
// supplied as input has the updates.
ConfigurationUtil.mergeConf(jobContext.getConfiguration(),
storeJob.getConfiguration());
}
开发者ID:sigmoidanalytics,项目名称:spork,代码行数:29,代码来源:PigOutputFormat.java
示例18: makeListener
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
protected static PigProgressNotificationListener makeListener(Properties properties) {
try {
return PigContext.instantiateObjectFromParams(
ConfigurationUtil.toConfiguration(properties),
PROGRESS_NOTIFICATION_LISTENER_KEY,
PROGRESS_NOTIFICATION_LISTENER_ARG_KEY,
PigProgressNotificationListener.class);
} catch (ExecException e) {
throw new RuntimeException(e);
}
}
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:13,代码来源:Main.java
示例19: load
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
public DataBag load(LoadFunc lfunc, PigContext pigContext) throws IOException {
DataBag content = BagFactory.getInstance().newDefaultBag();
ReadToEndLoader loader = new ReadToEndLoader(lfunc,
ConfigurationUtil.toConfiguration(pigContext.getProperties()), file, 0);
Tuple f = null;
while ((f = loader.getNext()) != null) {
content.add(f);
}
return content;
}
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:11,代码来源:PigFile.java
示例20: openDFSFile
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入依赖的package包/类
/**
* This function is meant to be used if the mappers/reducers want to access any HDFS file
* @param fileName
* @return InputStream of the open file.
* @throws IOException
*/
public static InputStream openDFSFile(String fileName) throws IOException {
Configuration conf = PigMapReduce.sJobConfInternal.get();
if (conf == null) {
throw new RuntimeException(
"can't open DFS file while executing locally");
}
return openDFSFile(fileName, ConfigurationUtil.toProperties(conf));
}
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:17,代码来源:FileLocalizer.java
注:本文中的org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论