本文整理汇总了Java中org.apache.hadoop.hdfs.web.URLConnectionFactory类的典型用法代码示例。如果您正苦于以下问题:Java URLConnectionFactory类的具体用法?Java URLConnectionFactory怎么用?Java URLConnectionFactory使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
URLConnectionFactory类属于org.apache.hadoop.hdfs.web包,在下文中一共展示了URLConnectionFactory类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: testReadURL
import org.apache.hadoop.hdfs.web.URLConnectionFactory; //导入依赖的package包/类
@Test
public void testReadURL() throws Exception {
HttpURLConnection conn = mock(HttpURLConnection.class);
doReturn(new ByteArrayInputStream(FAKE_LOG_DATA)).when(conn).getInputStream();
doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode();
doReturn(Integer.toString(FAKE_LOG_DATA.length)).when(conn).getHeaderField("Content-Length");
URLConnectionFactory factory = mock(URLConnectionFactory.class);
doReturn(conn).when(factory).openConnection(Mockito.<URL> any(),
anyBoolean());
URL url = new URL("http://localhost/fakeLog");
EditLogInputStream elis = EditLogFileInputStream.fromUrl(factory, url,
HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, false);
// Read the edit log and verify that we got all of the data.
EnumMap<FSEditLogOpCodes, Holder<Integer>> counts = FSImageTestUtil
.countEditLogOpTypes(elis);
assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1));
assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1));
assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1));
// Check that length header was picked up.
assertEquals(FAKE_LOG_DATA.length, elis.length());
elis.close();
}
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestEditLogFileInputStream.java
示例2: setUp
import org.apache.hadoop.hdfs.web.URLConnectionFactory; //导入依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
conf = new Configuration();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestNameNodeHttpServer.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
connectionFactory = URLConnectionFactory
.newDefaultURLConnectionFactory(conf);
conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
KeyStoreTestUtil.getClientSSLConfigFileName());
conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
KeyStoreTestUtil.getServerSSLConfigFileName());
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:TestNameNodeHttpServer.java
示例3: testReadURL
import org.apache.hadoop.hdfs.web.URLConnectionFactory; //导入依赖的package包/类
@Test
public void testReadURL() throws Exception {
HttpURLConnection conn = mock(HttpURLConnection.class);
doReturn(new ByteArrayInputStream(FAKE_LOG_DATA)).when(conn).getInputStream();
doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode();
doReturn(Integer.toString(FAKE_LOG_DATA.length)).when(conn).getHeaderField("Content-Length");
URLConnectionFactory factory = mock(URLConnectionFactory.class);
doReturn(conn).when(factory).openConnection(Mockito.<URL> any(),
anyBoolean());
URL url = new URL("http://localhost/fakeLog");
EditLogInputStream elis = EditLogFileInputStream.fromUrl(factory, url,
HdfsServerConstants.INVALID_TXID, HdfsServerConstants.INVALID_TXID, false);
// Read the edit log and verify that we got all of the data.
EnumMap<FSEditLogOpCodes, Holder<Integer>> counts = FSImageTestUtil
.countEditLogOpTypes(elis);
assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1));
assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1));
assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1));
// Check that length header was picked up.
assertEquals(FAKE_LOG_DATA.length, elis.length());
elis.close();
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:TestEditLogFileInputStream.java
示例4: run
import org.apache.hadoop.hdfs.web.URLConnectionFactory; //导入依赖的package包/类
private static HttpURLConnection run(URLConnectionFactory factory, URL url)
throws IOException, AuthenticationException {
HttpURLConnection conn = null;
try {
conn = (HttpURLConnection) factory.openConnection(url, true);
if (conn.getResponseCode() != HttpURLConnection.HTTP_OK) {
String msg = conn.getResponseMessage();
throw new IOException("Error when dealing remote token: " + msg);
}
} catch (IOException ie) {
LOG.info("Error when dealing remote token:", ie);
IOException e = getExceptionFromResponse(conn);
if (e != null) {
LOG.info("rethrowing exception from HTTP request: "
+ e.getLocalizedMessage());
throw e;
}
throw ie;
}
return conn;
}
开发者ID:yncxcw,项目名称:big-c,代码行数:25,代码来源:DelegationTokenFetcher.java
示例5: DFSck
import org.apache.hadoop.hdfs.web.URLConnectionFactory; //导入依赖的package包/类
public DFSck(Configuration conf, PrintStream out) throws IOException {
super(conf);
this.ugi = UserGroupInformation.getCurrentUser();
this.out = out;
this.connectionFactory = URLConnectionFactory
.newDefaultURLConnectionFactory(conf);
this.isSpnegoEnabled = UserGroupInformation.isSecurityEnabled();
}
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:DFSck.java
示例6: cancelDelegationToken
import org.apache.hadoop.hdfs.web.URLConnectionFactory; //导入依赖的package包/类
/**
* Cancel a Delegation Token.
* @param nnAddr the NameNode's address
* @param tok the token to cancel
* @throws IOException
* @throws AuthenticationException
*/
static public void cancelDelegationToken(URLConnectionFactory factory,
URI nnAddr, Token<DelegationTokenIdentifier> tok) throws IOException,
AuthenticationException {
StringBuilder buf = new StringBuilder(nnAddr.toString())
.append(CancelDelegationTokenServlet.PATH_SPEC).append("?")
.append(CancelDelegationTokenServlet.TOKEN).append("=")
.append(tok.encodeToUrlString());
HttpURLConnection conn = run(factory, new URL(buf.toString()));
conn.disconnect();
}
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:DelegationTokenFetcher.java
示例7: QuorumJournalManager
import org.apache.hadoop.hdfs.web.URLConnectionFactory; //导入依赖的package包/类
QuorumJournalManager(Configuration conf,
URI uri, NamespaceInfo nsInfo,
AsyncLogger.Factory loggerFactory) throws IOException {
Preconditions.checkArgument(conf != null, "must be configured");
this.conf = conf;
this.uri = uri;
this.nsInfo = nsInfo;
this.loggers = new AsyncLoggerSet(createLoggers(loggerFactory));
this.connectionFactory = URLConnectionFactory
.newDefaultURLConnectionFactory(conf);
// Configure timeouts.
this.startSegmentTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_QJOURNAL_START_SEGMENT_TIMEOUT_KEY,
DFSConfigKeys.DFS_QJOURNAL_START_SEGMENT_TIMEOUT_DEFAULT);
this.prepareRecoveryTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_QJOURNAL_PREPARE_RECOVERY_TIMEOUT_KEY,
DFSConfigKeys.DFS_QJOURNAL_PREPARE_RECOVERY_TIMEOUT_DEFAULT);
this.acceptRecoveryTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_QJOURNAL_ACCEPT_RECOVERY_TIMEOUT_KEY,
DFSConfigKeys.DFS_QJOURNAL_ACCEPT_RECOVERY_TIMEOUT_DEFAULT);
this.finalizeSegmentTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_QJOURNAL_FINALIZE_SEGMENT_TIMEOUT_KEY,
DFSConfigKeys.DFS_QJOURNAL_FINALIZE_SEGMENT_TIMEOUT_DEFAULT);
this.selectInputStreamsTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_QJOURNAL_SELECT_INPUT_STREAMS_TIMEOUT_KEY,
DFSConfigKeys.DFS_QJOURNAL_SELECT_INPUT_STREAMS_TIMEOUT_DEFAULT);
this.getJournalStateTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_QJOURNAL_GET_JOURNAL_STATE_TIMEOUT_KEY,
DFSConfigKeys.DFS_QJOURNAL_GET_JOURNAL_STATE_TIMEOUT_DEFAULT);
this.newEpochTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_QJOURNAL_NEW_EPOCH_TIMEOUT_KEY,
DFSConfigKeys.DFS_QJOURNAL_NEW_EPOCH_TIMEOUT_DEFAULT);
this.writeTxnsTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_KEY,
DFSConfigKeys.DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_DEFAULT);
}
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:QuorumJournalManager.java
示例8: setUp
import org.apache.hadoop.hdfs.web.URLConnectionFactory; //导入依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
conf = new Configuration();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestNameNodeHttpServer.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
connectionFactory = URLConnectionFactory
.newDefaultURLConnectionFactory(conf);
}
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestNameNodeHttpServer.java
示例9: refresh
import org.apache.hadoop.hdfs.web.URLConnectionFactory; //导入依赖的package包/类
void refresh() throws IOException {
try {
OkHttpClient client = new OkHttpClient();
client.setConnectTimeout(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
TimeUnit.MILLISECONDS);
client.setReadTimeout(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
TimeUnit.MILLISECONDS);
String bodyString = Utils.postBody(GRANT_TYPE, REFRESH_TOKEN,
REFRESH_TOKEN, refreshToken,
CLIENT_ID, clientId);
RequestBody body = RequestBody.create(URLENCODED, bodyString);
Request request = new Request.Builder()
.url(refreshURL)
.post(body)
.build();
Response responseBody = client.newCall(request).execute();
if (responseBody.code() != HttpStatus.SC_OK) {
throw new IllegalArgumentException("Received invalid http response: "
+ responseBody.code() + ", text = " + responseBody.toString());
}
ObjectMapper mapper = new ObjectMapper();
Map<?, ?> response = mapper.reader(Map.class)
.readValue(responseBody.body().string());
String newExpiresIn = response.get(EXPIRES_IN).toString();
accessTokenTimer.setExpiresIn(newExpiresIn);
accessToken = response.get(ACCESS_TOKEN).toString();
} catch (Exception e) {
throw new IOException("Exception while refreshing access token", e);
}
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:39,代码来源:ConfRefreshTokenBasedAccessTokenProvider.java
示例10: refresh
import org.apache.hadoop.hdfs.web.URLConnectionFactory; //导入依赖的package包/类
void refresh() throws IOException {
try {
OkHttpClient client = new OkHttpClient();
client.setConnectTimeout(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
TimeUnit.MILLISECONDS);
client.setReadTimeout(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
TimeUnit.MILLISECONDS);
String bodyString = Utils.postBody(CLIENT_SECRET, getCredential(),
GRANT_TYPE, CLIENT_CREDENTIALS,
CLIENT_ID, clientId);
RequestBody body = RequestBody.create(URLENCODED, bodyString);
Request request = new Request.Builder()
.url(refreshURL)
.post(body)
.build();
Response responseBody = client.newCall(request).execute();
if (responseBody.code() != HttpStatus.SC_OK) {
throw new IllegalArgumentException("Received invalid http response: "
+ responseBody.code() + ", text = " + responseBody.toString());
}
ObjectMapper mapper = new ObjectMapper();
Map<?, ?> response = mapper.reader(Map.class)
.readValue(responseBody.body().string());
String newExpiresIn = response.get(EXPIRES_IN).toString();
timer.setExpiresIn(newExpiresIn);
accessToken = response.get(ACCESS_TOKEN).toString();
} catch (Exception e) {
throw new IOException("Unable to obtain access token from credential", e);
}
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:39,代码来源:CredentialBasedAccessTokenProvider.java
示例11: renewDelegationToken
import org.apache.hadoop.hdfs.web.URLConnectionFactory; //导入依赖的package包/类
/**
* Renew a Delegation Token.
* @param nnAddr the NameNode's address
* @param tok the token to renew
* @return the Date that the token will expire next.
* @throws IOException
* @throws AuthenticationException
*/
static public long renewDelegationToken(URLConnectionFactory factory,
URI nnAddr, Token<DelegationTokenIdentifier> tok) throws IOException,
AuthenticationException {
StringBuilder buf = new StringBuilder(nnAddr.toString())
.append(RenewDelegationTokenServlet.PATH_SPEC).append("?")
.append(RenewDelegationTokenServlet.TOKEN).append("=")
.append(tok.encodeToUrlString());
HttpURLConnection connection = null;
BufferedReader in = null;
try {
connection = run(factory, new URL(buf.toString()));
in = new BufferedReader(new InputStreamReader(
connection.getInputStream(), Charsets.UTF_8));
long result = Long.parseLong(in.readLine());
return result;
} catch (IOException ie) {
LOG.info("error in renew over HTTP", ie);
IOException e = getExceptionFromResponse(connection);
if (e != null) {
LOG.info("rethrowing exception from HTTP request: "
+ e.getLocalizedMessage());
throw e;
}
throw ie;
} finally {
IOUtils.cleanup(LOG, in);
if (connection != null) {
connection.disconnect();
}
}
}
开发者ID:yncxcw,项目名称:big-c,代码行数:42,代码来源:DelegationTokenFetcher.java
示例12: URLLog
import org.apache.hadoop.hdfs.web.URLConnectionFactory; //导入依赖的package包/类
public URLLog(URLConnectionFactory connectionFactory, URL url) {
this.connectionFactory = connectionFactory;
this.isSpnegoEnabled = UserGroupInformation.isSecurityEnabled();
this.url = url;
}
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:EditLogFileInputStream.java
注:本文中的org.apache.hadoop.hdfs.web.URLConnectionFactory类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论