• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Scala Configuration类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Scala中org.apache.hadoop.conf.Configuration的典型用法代码示例。如果您正苦于以下问题:Scala Configuration类的具体用法?Scala Configuration怎么用?Scala Configuration使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Configuration类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Scala代码示例。

示例1: HdfsClientConf

//设置package包名称以及导入依赖的类
package pub.ayada.scala.hdfsutils

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.fs.FileSystem

class HdfsClientConf private (val coreStiteXMLPath: String, val hdfsStiteXMLPath: String) {
    private val conf = new Configuration();
    conf.addResource(new Path(coreStiteXMLPath));
    conf.addResource(new Path(hdfsStiteXMLPath));

    def getHdfsClientConf(): Configuration = conf

    def getHdpFileSystem(): FileSystem = FileSystem.get(conf);
}
object HdfsClientConf {

    private var instance: HdfsClientConf = null

    def getOneTimeInstance(coreStiteXMLPath: String, hdfsStiteXMLPath: String): Configuration = {
        new HdfsClientConf(coreStiteXMLPath, hdfsStiteXMLPath).getHdfsClientConf()
    }
    def setSingltonInstance(coreStiteXMLPath: String, hdfsStiteXMLPath: String): Configuration = {
        if (instance == null)
            instance = new HdfsClientConf(coreStiteXMLPath, hdfsStiteXMLPath)

        instance.getHdfsClientConf()
    }
    def getSingletonInstance(): HdfsClientConf = {
        if (instance == null)
            throw new NullPointerException("Instanciate HdfsClientConf before retriving")

        instance
    }
} 
开发者ID:k-ayada,项目名称:HdfsUtils,代码行数:36,代码来源:HdfsClientConf.scala


示例2: HDFS

//设置package包名称以及导入依赖的类
package org.mireynol.util

import java.io.{BufferedInputStream, OutputStreamWriter}

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.slf4j.{Logger, LoggerFactory}

import scala.collection.mutable.ListBuffer
import scala.io.Source

object HDFS {

  def log : Logger = LoggerFactory.getLogger( HDFS.getClass )

  val hadoop : FileSystem = {
    val conf = new Configuration( )
    conf.set( "fs.defaultFS", "hdfs://localhost:9000" )
    FileSystem.get( conf )
  }

  def readAndMap( path : String, mapper : ( String ) => Unit ) = {
    if ( hadoop.exists( new Path( path ) ) ) {
      val is = new BufferedInputStream( hadoop.open( new Path( path ) ) )
      Source.fromInputStream( is ).getLines( ).foreach( mapper )
    }
    else {
      // TODO - error logic here
    }
  }

  def write( filename : String, content : Iterator[ String ] ) = {
    val path = new Path( filename )
    val out = new OutputStreamWriter( hadoop.create( path, false ) )
    content.foreach( str => out.write( str + "\n" ) )
    out.flush( )
    out.close( )
  }

  def ls( path : String ) : List[ String ] = {
    val files = hadoop.listFiles( new Path( path ), false )
    val filenames = ListBuffer[ String ]( )
    while ( files.hasNext ) filenames += files.next( ).getPath( ).toString( )
    filenames.toList
  }

  def rm( path : String, recursive : Boolean ) : Unit = {
    if ( hadoop.exists( new Path( path ) ) ) {
      println( "deleting file : " + path )
      hadoop.delete( new Path( path ), recursive )
    }
    else {
      println( "File/Directory" + path + " does not exist" )
      log.warn( "File/Directory" + path + " does not exist" )
    }
  }

  def cat( path : String ) = Source.fromInputStream( hadoop.open( new Path( path ) ) ).getLines( ).foreach( println )

} 
开发者ID:reynoldsm88,项目名称:spark-drools,代码行数:61,代码来源:HDFS.scala


示例3: HTableStage

//设置package包名称以及导入依赖的类
package akka.stream.alpakka.hbase.javadsl

import akka.stream.alpakka.hbase.HTableSettings
import akka.stream.alpakka.hbase.internal.HBaseFlowStage
import akka.stream.scaladsl.{Flow, Keep, Sink}
import akka.{Done, NotUsed}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.Put

import scala.collection.immutable
import scala.concurrent.Future

object HTableStage {

  def table[T](conf: Configuration,
               tableName: TableName,
               columnFamilies: java.util.List[String],
               converter: java.util.function.Function[T, Put]): HTableSettings[T] = {
    import scala.compat.java8.FunctionConverters._
    import scala.collection.JavaConverters._
    HTableSettings(conf, tableName, immutable.Seq(columnFamilies.asScala: _*), asScalaFromFunction(converter))
  }

  def sink[A](config: HTableSettings[A]): akka.stream.javadsl.Sink[A, Future[Done]] =
    Flow[A].via(flow(config)).toMat(Sink.ignore)(Keep.right).asJava

  def flow[A](settings: HTableSettings[A]): akka.stream.javadsl.Flow[A, A, NotUsed] =
    Flow.fromGraph(new HBaseFlowStage[A](settings)).asJava

} 
开发者ID:akka,项目名称:alpakka,代码行数:32,代码来源:HTableStage.scala


示例4: list

//设置package包名称以及导入依赖的类
package uk.co.odinconsultants.bitcoin.integration.hadoop

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hdfs.DistributedFileSystem
import uk.co.odinconsultants.bitcoin.core.Logging
import uk.co.odinconsultants.bitcoin.integration.hadoop.HadoopForTesting.hdfsCluster

import scala.collection.mutable.ArrayBuffer

trait MiniHadoopClusterRunning extends Logging {

  val distributedFS: DistributedFileSystem  = hdfsCluster.getFileSystem
  val conf: Configuration                   = HadoopForTesting.conf
  val dir                                   = s"/${this.getClass.getSimpleName}/"

  def list(path: String): List[Path] = {
    info(s"Looking in $path")

    val files = distributedFS.listFiles(new Path(path), true)

    val allPaths = ArrayBuffer[Path]()
    while (files.hasNext) {
      val file = files.next
      allPaths += file.getPath
    }

    allPaths.toList
  }

  def copyToHdfs(inputFile: Path): Path = {
    val fromFile  = inputFile.getName
    distributedFS.mkdirs(new Path(dir))
    val toFile    = new Path(dir + fromFile)
    info(s"Copying '$fromFile' to '$toFile' (${toFile.getName})")
    distributedFS.copyFromLocalFile(false, true, inputFile, toFile)
    toFile
  }

  def localFile(local: String): Path = {
    val classLoader = getClass.getClassLoader
    val localFQN    = classLoader.getResource(local).getFile
    new Path(localFQN)
  }
} 
开发者ID:PhillHenry,项目名称:Cryptorigin,代码行数:46,代码来源:MiniHadoopClusterRunning.scala


示例5: BenchmarkYarnPrepare

//设置package包名称以及导入依赖的类
package com.microsoft.spark.perf

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}

object BenchmarkYarnPrepare {

  private def uploadFile(localPath: String, remoteWorkingDir: String): Unit = {
    new Path(remoteWorkingDir).getFileSystem(new Configuration()).
      copyFromLocalFile(new Path(localPath), new Path(remoteWorkingDir))
  }

  private def createRemoteWorkingDir(
      remoteWorkingDir: String,
      localJarPath: String,
      sparkSubmitParamsPath: String,
      benchmarkParamsPath: String): Unit = {
    uploadFile(localJarPath, remoteWorkingDir + "/spark-benchmark.jar")
    uploadFile(sparkSubmitParamsPath, remoteWorkingDir + "/spark.conf")
    uploadFile(benchmarkParamsPath, remoteWorkingDir + "/benchmark.conf")
  }

  def main(args: Array[String]): Unit = {
    val remoteWorkingDir = args(0)
    val localJarPath = args(1)
    val sparkSubmitParamsFilePath = args(2)
    val benchmarkParamsFilePath = args(3)

    createRemoteWorkingDir(remoteWorkingDir, localJarPath, sparkSubmitParamsFilePath,
      benchmarkParamsFilePath)
  }
} 
开发者ID:hdinsight,项目名称:SparkPerf,代码行数:33,代码来源:BenchmarkYarnPrepare.scala


示例6: FileUtils

//设置package包名称以及导入依赖的类
package com.asto.dmp.xxx.util

import com.asto.dmp.xxx.base.Constants
import com.asto.dmp.ycd.base.Constants
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.Logging
import org.apache.spark.rdd.RDD


object FileUtils extends Logging {
  private val conf = new Configuration()
  conf.set("fs.defaultFS", Constants.Hadoop.DEFAULT_FS)
  conf.set("mapreduce.jobtracker.address", Constants.Hadoop.JOBTRACKER_ADDRESS)

  def deleteFilesInHDFS(paths: String*) = {
    paths.foreach { path =>
      val filePath = new Path(path)
      val HDFSFilesSystem = filePath.getFileSystem(new Configuration())
      if (HDFSFilesSystem.exists(filePath)) {
        logInfo(s"?????$filePath")
        HDFSFilesSystem.delete(filePath, true)
      }
    }
  }

  def saveAsTextFile[T <: Product](rdd: RDD[T], savePath: String) = {
    deleteFilesInHDFS(savePath)
    logInfo(s"?${savePath}?????")
    rdd.map(_.productIterator.mkString(Constants.OutputPath.SEPARATOR)).coalesce(1).saveAsTextFile(savePath)
  }

  def saveAsTextFile(text: String, savePath: String) = {
    deleteFilesInHDFS(savePath)
    logInfo(s"?${savePath}?????")
    val out = FileSystem.get(conf).create(new Path(savePath))
    out.write(text.getBytes)
    out.flush()
    out.close()
  }
} 
开发者ID:zj-lingxin,项目名称:asto-sparksql,代码行数:42,代码来源:FileUtils.scala


示例7: HTableSettings

//设置package包名称以及导入依赖的类
package akka.stream.alpakka.hbase

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.Put

import scala.collection.immutable

final case class HTableSettings[T](conf: Configuration,
                                   tableName: TableName,
                                   columnFamilies: immutable.Seq[String],
                                   converter: T => Put)

object HTableSettings {
  def create[T](conf: Configuration,
                tableName: TableName,
                columnFamilies: java.util.List[String],
                converter: java.util.function.Function[T, Put]): HTableSettings[T] = {
    import scala.compat.java8.FunctionConverters._
    import scala.collection.JavaConverters._
    HTableSettings(conf, tableName, immutable.Seq(columnFamilies.asScala: _*), asScalaFromFunction(converter))
  }
} 
开发者ID:akka,项目名称:alpakka,代码行数:24,代码来源:HBaseSettings.scala


示例8: HBaseGlobalValues

//设置package包名称以及导入依赖的类
package com.hadooparchitecturebook.taxi360.server.hbase

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{Connection, ConnectionFactory}

object HBaseGlobalValues {
  var appEventTableName = "app-event"
  var numberOfSalts = 10000
  var connection:Connection = null

  def init(conf:Configuration, numberOfSalts:Int,
           appEventTableName:String): Unit = {
    connection = ConnectionFactory.createConnection(conf)
    this.numberOfSalts = numberOfSalts
    this.appEventTableName = appEventTableName
  }
} 
开发者ID:hadooparchitecturebook,项目名称:Taxi360,代码行数:18,代码来源:HBaseGlobalValues.scala


示例9: Converter

//设置package包名称以及导入依赖的类
package com.dataoptimo.imgprocessing.convert
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.io.Text
import org.apache.hadoop.io.BytesWritable
import org.apache.hadoop.fs.{Path, FileSystem}
import org.apache.hadoop.io.IOUtils
import org.apache.hadoop.io.SequenceFile
import java.io.IOException
import java.lang.IllegalArgumentException

class Converter(conf: Configuration) {
  
  def imageToSequence(srcPath: String, dstPath: String){
    try {
    val fs = FileSystem.get(conf);
    val inPath = new Path(srcPath);
    val outPath = new Path(dstPath);
    val key = new Text();
    val value = new BytesWritable();
    val in = fs.open(inPath);
    val buffer = new Array[Byte](in.available())
    in.read(buffer);
    var writer = SequenceFile.createWriter(fs, conf, outPath, key.getClass(),value.getClass());
    writer.append(new Text(inPath.getName()), new BytesWritable(buffer));
    IOUtils.closeStream(writer);
    }
    catch {
      case io: IOException => println(io.getMessage)
      case illegalArgument: IllegalArgumentException => println(illegalArgument.getMessage)
    }
    
    
    
  }
} 
开发者ID:mfawadalam,项目名称:imgprocessing,代码行数:36,代码来源:Converters.scala


示例10: ParquetUtils

//设置package包名称以及导入依赖的类
package com.scalagen.util

import scala.collection.JavaConverters._

import com.scalagen.data._
import com.scalagen.data.api.Source

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.parquet.hadoop.ParquetFileReader
import org.apache.parquet.schema.OriginalType._
import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName._
import org.apache.parquet.schema._
import org.slf4j.{Logger, LoggerFactory}


object ParquetUtils {
  private val logger: Logger = LoggerFactory.getLogger(getClass)

  private[scalagen] def makeSchema(s: String, sources: Seq[Source[_, _]], headers: Seq[String]): MessageType = {
    logger.debug(s"Making schema for ${sources.mkString(", ")}")
    val sourceTypes: Seq[Type] = sources.zip(headers).map {
      case (s: Source[_, _], n: String) => sourceToParquetType(s, n)
      case _                            => throw new IllegalArgumentException("Bad input for parquet source types.")
    }
    new MessageType(s, sourceTypes: _*)
  }

  private[scalagen] def sourceToParquetType(s: Source[_, _], columnName: String): Type = {
    s match {
      case _: GaussianSource | _: RandomDouble                            => Types.required(DOUBLE).named(columnName)
      case _: IncrementingSource | _: DeincrementingSource | _: RandomInt => Types.required(INT32).named(columnName)
      case _: DateSource                                                  => Types.required(BINARY).as(UTF8).named(columnName)
      case _: BernoulliSource                                             => Types.required(BOOLEAN).named(columnName)
      case _                                                              => Types.required(BINARY).as(UTF8).named(columnName)
    }
  }

  def parquetRowCount(s: String): Long = {
    parquetRowCount(new Path(s))
  }

  def parquetRowCount(p: Path, conf: Configuration = new Configuration()): Long = {
    val fs: FileSystem     = p.getFileSystem(conf)
    val status: FileStatus = fs.getFileStatus(p)
    ParquetFileReader.readFooters(conf, status, false).asScala.head.getParquetMetadata.getBlocks.asScala.map(_.getRowCount).sum
  }
} 
开发者ID:hntd187,项目名称:scalagen,代码行数:49,代码来源:ParquetUtils.scala


示例11: HdfsWriteTest

//设置package包名称以及导入依赖的类
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{Path, FileSystem}
import org.scalatest._

class HdfsWriteTest extends FlatSpec with Matchers {
  "Hello" should "have tests" in {

    def write(uri: String, filePath: String, data: Array[Byte]) = {
//      System.setProperty("HADOOP_USER_NAME", "Mariusz")
      val path = new Path(filePath)
      val conf = new Configuration()
      conf.set("fs.defaultFS", uri)
      val fs = FileSystem.get(conf)
      val os = fs.create(path)
      os.write(data)
      fs.close()
    }

    write("hdfs://0.0.0.0:19000", "hdfs://0.0.0.0:19000/user/cloudera/test.txt", "Hello World".getBytes)


  }
} 
开发者ID:ralreiroe,项目名称:embarcadero,代码行数:24,代码来源:HdfsWriteTest.scala


示例12: item

//设置package包名称以及导入依赖的类
package mesosphere.marathon.io.storage

import java.io.{ File, FileInputStream, InputStream, OutputStream }
import java.net.URI

import org.apache.hadoop.conf.Configuration

import mesosphere.chaos.http.HttpConf
import mesosphere.marathon.MarathonConf
import mesosphere.marathon.io.IO


trait StorageProvider {
  def item(path: String): StorageItem
}

object StorageProvider {
  val HDFS = "^(hdfs://[^/]+)(.*)$".r // hdfs://host:port/path
  val FILE = "^file://(.*)$".r // file:///local/artifact/path

  @SuppressWarnings(Array("OptionGet"))
  def provider(config: MarathonConf, http: HttpConf): StorageProvider =
    config.artifactStore.get.getOrElse("") match {
      case HDFS(uri, base) =>
        new HDFSStorageProvider(
          new URI(uri),
          if (base.isEmpty) "/" else base,
          new Configuration()
        )

      case FILE(base) =>
        new FileStorageProvider(
          s"http://${config.hostname.get.get}:${http.httpPort.get.get}/v2/artifacts",
          new File(base)
        )

      case _ =>
        new NoStorageProvider()
    }

  def isValidUrl(url: String): Boolean = url match {
    case HDFS(_, _) => true
    case FILE(_) => true
    case _ => false
  }

  def examples: Map[String, String] = Map (
    "hdfs" -> "hdfs://localhost:54310/path/to/store",
    "file" -> "file:///var/log/store"
  )
} 
开发者ID:xiaozai512,项目名称:marathon,代码行数:52,代码来源:Storage.scala


示例13: HBaseGlobalValues

//设置package包名称以及导入依赖的类
package com.cloudera.sa.apptrans.server.hbase

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{Connection, ConnectionFactory}

object HBaseGlobalValues {
  var appEventTableName = "app-event"
  var accountMartTableName = "account-mart"
  var numberOfSalts = 10000
  var connection:Connection = null

  def init(conf:Configuration, numberOfSalts:Int,
           appEventTableName:String,
           accountMartTableName:String): Unit = {
    connection = ConnectionFactory.createConnection(conf)
    this.numberOfSalts = numberOfSalts
    this.appEventTableName = appEventTableName
    this.accountMartTableName = accountMartTableName
  }
} 
开发者ID:tmalaska,项目名称:AppTrans,代码行数:21,代码来源:HBaseGlobalValues.scala


示例14: ConvertsSpec

//设置package包名称以及导入依赖的类
package com.newegg.eims.DataPorter.HDFS

import java.io.File

import com.newegg.eims.DataPorter.HDFS.Converts._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.security.UserGroupInformation
import org.scalatest.{FlatSpec, Matchers}


class ConvertsSpec extends FlatSpec with Matchers {
  val currentDir = new File(".").getCanonicalPath + File.separator + "target" + File.separator

  "hdfs" should "can do with hadoop user" in {
    var test = 1
    new File(currentDir + "test.txt").doAs(UserGroupInformation.createRemoteUser("vq83"), f => test = 2)
    test should be(2)
  }

  it should "no copy file when not exists file path" in {
    new File(currentDir + "test.txt").copyToHDFS(new Path(currentDir + "test1.txt"), new Configuration())
    new File(currentDir + "test1.txt").exists() shouldBe false
  }

  it should "copy file when exists file path" in {
    val f = new File(currentDir + "test.txt")
    f.createNewFile()
    f.exists() shouldBe true
    new File(currentDir + "test.txt").copyToHDFS(new Path(currentDir + "test1.txt"), new Configuration())
    val f2 = new File(currentDir + "test1.txt")
    f2.exists() shouldBe true
    f.delete() shouldBe true
    f2.delete() shouldBe true
  }
} 
开发者ID:CodeBabyBear,项目名称:DataPorter,代码行数:37,代码来源:ConvertsSpec.scala


示例15: ParquetReadSupport

//设置package包名称以及导入依赖的类
package com.newegg.eims.DataPorter.Parquet

import java.util

import com.newegg.eims.DataPorter.Base.DataSetSchema
import org.apache.hadoop.conf.Configuration
import parquet.hadoop.api.ReadSupport
import parquet.hadoop.api.ReadSupport.ReadContext
import parquet.io.api.RecordMaterializer
import parquet.schema.MessageType


class ParquetReadSupport extends ReadSupport[ParquetDataRow] {
  private var schema: DataSetSchema = _

  def setSchema(dataSchema: DataSetSchema): Unit = schema = dataSchema

  override def prepareForRead(configuration: Configuration, keyValueMetaData: util.Map[String, String],
                              fileSchema: MessageType, readContext: ReadContext): RecordMaterializer[ParquetDataRow] = {
    new ParquetRecordMaterializer(fileSchema, schema, new ParquetSchemaConverter(configuration))
  }

  override def init(configuration: Configuration, keyValueMetaData: util.Map[String, String],
                    fileSchema: MessageType): ReadContext = {
    new ReadContext(fileSchema, keyValueMetaData)
  }
} 
开发者ID:CodeBabyBear,项目名称:DataPorter,代码行数:28,代码来源:ParquetReadSupport.scala


示例16: ParquetOutputWriter

//设置package包名称以及导入依赖的类
package com.newegg.eims.DataPorter.Parquet

import com.newegg.eims.DataPorter.Base.{DataSetSchema, IDataRow}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapred.{JobConf, TaskAttemptContextImpl}
import org.apache.hadoop.mapreduce.TaskAttemptContext
import parquet.hadoop.ParquetOutputFormat
import parquet.hadoop.api.WriteSupport


class ParquetOutputWriter(dataSetSchema: DataSetSchema, path: Path, conf: JobConf) {

  class IDataRowParquetOutputFormat(support: ParquetWriteSupport, filePath: Path) extends ParquetOutputFormat[IDataRow]() {
    override def getWriteSupport(configuration: Configuration): WriteSupport[IDataRow] = {
      support
    }

    override def getDefaultWorkFile(context: TaskAttemptContext, extension: String): Path = {
      filePath
    }
  }

  private val context = new TaskAttemptContextImpl(conf, new org.apache.hadoop.mapred.TaskAttemptID())
  private val formatter = {
    val support = new ParquetOutputFormat[IDataRow]().getWriteSupport(conf).asInstanceOf[ParquetWriteSupport]
    support.setSchema(dataSetSchema)
    new IDataRowParquetOutputFormat(support, path)
  }

  private val recordWriter = formatter.getRecordWriter(context)

  def write(row: IDataRow): Unit = recordWriter.write(null, row)

  def close(): Unit = recordWriter.close(context)
} 
开发者ID:CodeBabyBear,项目名称:DataPorter,代码行数:37,代码来源:ParquetOutputWriter.scala


示例17: SessionDataFileHDFSWriter

//设置package包名称以及导入依赖的类
package com.malaska.spark.training.streaming.dstream.sessionization

import java.io.BufferedWriter
import java.io.FileWriter
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.conf.Configuration
import java.io.OutputStreamWriter
import org.apache.hadoop.fs.Path
import java.util.Random

object SessionDataFileHDFSWriter {
  
  val eol = System.getProperty("line.separator");  
  
  def main(args: Array[String]) {
    if (args.length == 0) {
        println("SessionDataFileWriter {tempDir} {distDir} {numberOfFiles} {numberOfEventsPerFile} {waitBetweenFiles}");
        return;
    }
    val conf = new Configuration
    conf.addResource(new Path("/etc/hadoop/conf/core-site.xml"))
    conf.addResource(new Path("/etc/hadoop/conf/mapred-site.xml"))
    conf.addResource(new Path("/etc/hadoop/conf/hdfs-site.xml"))
    
    val fs = FileSystem.get(new Configuration)
    val rootTempDir = args(0)
    val rootDistDir = args(1)
    val files = args(2).toInt
    val loops = args(3).toInt
    val waitBetweenFiles = args(4).toInt
    val r = new Random
    for (f <- 1 to files) {
      val rootName = "/weblog." + System.currentTimeMillis()
      val tmpPath = new Path(rootTempDir + rootName + ".tmp")
      val writer = new BufferedWriter(new OutputStreamWriter(fs.create(tmpPath)))
      
      print(f + ": [")
      
      val randomLoops = loops + r.nextInt(loops)
      
      for (i <- 1 to randomLoops) {
        writer.write(SessionDataGenerator.getNextEvent + eol)
        if (i%100 == 0) {
          print(".")
        }
      }
      println("]")
      writer.close
      
      val distPath = new Path(rootDistDir + rootName + ".dat")
      
      fs.rename(tmpPath, distPath)
      Thread.sleep(waitBetweenFiles)
    }
    println("Done")
  }
} 
开发者ID:TedBear42,项目名称:spark_training,代码行数:58,代码来源:SessionDataFileHDFSWriter.scala


示例18: XDSessionCatalog

//设置package包名称以及导入依赖的类
package org.apache.spark.sql.crossdata.catalyst.catalog

import org.apache.hadoop.conf.Configuration
import org.apache.spark.sql.catalyst.CatalystConf
import org.apache.spark.sql.catalyst.analysis.FunctionRegistry
import org.apache.spark.sql.catalyst.catalog.{ExternalCatalog, FunctionResourceLoader, GlobalTempViewManager, SessionCatalog}

import org.apache.spark.sql.crossdata.catalyst.catalog.temporary.XDTemporaryCatalog

class XDSessionCatalog(
                        val temporaryCatalog: XDTemporaryCatalog,
                        externalCatalog: ExternalCatalog,
                        globalTempViewManager: GlobalTempViewManager,
                        functionResourceLoader: FunctionResourceLoader,
                        functionRegistry: FunctionRegistry,
                        conf: CatalystConf,
                        hadoopConf: Configuration) extends SessionCatalog(
  externalCatalog,
  globalTempViewManager,
  functionResourceLoader,
  functionRegistry,
  conf,
  hadoopConf
) 
开发者ID:nagyistge,项目名称:crossdata-spark2,代码行数:25,代码来源:XDSessionCatalog.scala


示例19: sparkContext

//设置package包名称以及导入依赖的类
package org.bdgenomics.adam.rdd

import org.apache.spark.SparkContext
import org.bdgenomics.adam.rdd.feature.FeatureRDD
import org.bdgenomics.adam.rdd.ADAMContext._
import comp.bio.aging.playground.extensions._

trait HDFSFilesExtensions {

  def sparkContext: SparkContext

  def openFolder(where: String): (List[String], List[String]) = {
    import  org.apache.hadoop.fs._
    val hadoopConfig = sparkContext.hadoopConfiguration
    val fs = FileSystem.get( hadoopConfig )
    val pathes = fs.listStatus(new Path(where)).toList
    val (dirs, files) = pathes.partition(s=>s.isDirectory && !s.getPath.getName.toLowerCase.endsWith(".adam"))
    val dirPathes = dirs.map(d=>d.getPath.toString)
    val filePathes = files.map(d=>d.getPath.toString)
    (dirPathes, filePathes)
  }

  def openFolderRecursive(where: String): List[String] = {
    import  org.apache.hadoop.fs._
    val hadoopConfig = sparkContext.hadoopConfiguration
    val fs = FileSystem.get( hadoopConfig )
    val pathes = fs.listStatus(new Path(where)).toList
    val (dirs, files) = pathes.partition(s=>s.isDirectory && !s.getPath.getName.toLowerCase.endsWith(".adam"))
    files.map(d=>d.getPath.toString) ++
      dirs.map(d=>d.getPath.toString).flatMap(openFolderRecursive)
  }


  def loadNarrowPeaksByContig(where: String, contigName: String): List[FeatureRDD] = {
    openFolderRecursive(where)
      .map(sparkContext.loadNarrowPeak(_)
        .byContig(contigName))
  }

  def loadBedsByContig(where: String, contigName: String, innerJoin: Boolean = true): List[FeatureRDD] = {
    openFolderRecursive(where)
      .map(sparkContext.loadBed(_)
        .byContig(contigName))
  }

  import org.apache.hadoop.conf.Configuration
  import org.apache.hadoop.fs._

  def merge(srcPath: String, dstPath: String): Unit =  {
    val hadoopConfig = new Configuration()
    val hdfs = FileSystem.get(hadoopConfig)
    FileUtil.copyMerge(hdfs, new Path(srcPath), hdfs, new Path(dstPath), true, hadoopConfig, null)
    // the "true" setting deletes the source files once they are merged into the new output
  }


} 
开发者ID:antonkulaga,项目名称:adam-playground,代码行数:58,代码来源:HDFSFilesExtensions.scala


示例20: ConnectionProvider

//设置package包名称以及导入依赖的类
package com.flipkart.connekt.commons.connections

import java.util.Properties
import javax.sql.DataSource

import com.aerospike.client.Host
import com.aerospike.client.async.{AsyncClient, AsyncClientPolicy}
import com.couchbase.client.java.{Cluster, CouchbaseCluster}
import org.apache.commons.dbcp2.BasicDataSourceFactory
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{Connection, ConnectionFactory}

import scala.collection.JavaConverters._

class ConnectionProvider extends TConnectionProvider {

  override def createHbaseConnection(hConnConfig: Configuration): Connection = ConnectionFactory.createConnection(hConnConfig)

  override def createDatasourceConnection(mySQLProperties: Properties): DataSource = BasicDataSourceFactory.createDataSource(mySQLProperties)

  override def createCouchBaseConnection(nodes: List[String]): Cluster = CouchbaseCluster.create(nodes.asJava)

  override def createAeroSpikeConnection(nodes: List[String]): AsyncClient =  {
    val asyncClientPolicy = new AsyncClientPolicy()
    asyncClientPolicy.asyncMaxCommands = 500
    asyncClientPolicy.asyncSelectorThreads = 4
    new AsyncClient(asyncClientPolicy, nodes.map(new Host(_, 3000)): _ *)
  }
} 
开发者ID:ayush03agarwal,项目名称:connekt,代码行数:30,代码来源:ConnectionProvider.scala



注:本文中的org.apache.hadoop.conf.Configuration类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Scala StreamingContext类代码示例发布时间:2022-05-23
下一篇:
Scala Reads类代码示例发布时间:2022-05-23
热门推荐
热门话题
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap