1 | /******************************************************************** |
2 | * Copyright (c) 2013 - 2014, Pivotal Inc. |
3 | * All rights reserved. |
4 | * |
5 | * Author: Zhanwei Wang |
6 | ********************************************************************/ |
7 | /******************************************************************** |
8 | * 2014 - |
9 | * open source under Apache License Version 2.0 |
10 | ********************************************************************/ |
11 | /** |
12 | * Licensed to the Apache Software Foundation (ASF) under one |
13 | * or more contributor license agreements. See the NOTICE file |
14 | * distributed with this work for additional information |
15 | * regarding copyright ownership. The ASF licenses this file |
16 | * to you under the Apache License, Version 2.0 (the |
17 | * "License"); you may not use this file except in compliance |
18 | * with the License. You may obtain a copy of the License at |
19 | * |
20 | * http://www.apache.org/licenses/LICENSE-2.0 |
21 | * |
22 | * Unless required by applicable law or agreed to in writing, software |
23 | * distributed under the License is distributed on an "AS IS" BASIS, |
24 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
25 | * See the License for the specific language governing permissions and |
26 | * limitations under the License. |
27 | */ |
28 | #include "platform.h" |
29 | |
30 | #include "Exception.h" |
31 | #include "ExceptionInternal.h" |
32 | #include "FileSystem.h" |
33 | #include "hdfs.h" |
34 | #include "InputStream.h" |
35 | #include "Logger.h" |
36 | #include "Logger.h" |
37 | #include "Memory.h" |
38 | #include "OutputStream.h" |
39 | #include "server/NamenodeInfo.h" |
40 | #include "SessionConfig.h" |
41 | #include "Thread.h" |
42 | #include "XmlConfig.h" |
43 | |
44 | #include <vector> |
45 | #include <string> |
46 | #include <libxml/uri.h> |
47 | |
48 | #ifdef __cplusplus |
49 | extern "C" { |
50 | #endif |
51 | |
52 | #define KERBEROS_TICKET_CACHE_PATH "hadoop.security.kerberos.ticket.cache.path" |
53 | |
54 | #ifndef ERROR_MESSAGE_BUFFER_SIZE |
55 | #define ERROR_MESSAGE_BUFFER_SIZE 4096 |
56 | #endif |
57 | |
58 | static THREAD_LOCAL char ErrorMessage[ERROR_MESSAGE_BUFFER_SIZE] = "Success" ; |
59 | |
60 | static void SetLastException(Hdfs::exception_ptr e) { |
61 | std::string buffer; |
62 | const char *p; |
63 | p = Hdfs::Internal::GetExceptionMessage(e, buffer); |
64 | strncpy(ErrorMessage, p, sizeof(ErrorMessage) - 1); |
65 | ErrorMessage[sizeof(ErrorMessage) - 1] = 0; |
66 | } |
67 | |
68 | static void SetErrorMessage(const char *msg) { |
69 | assert(NULL != msg); |
70 | strncpy(ErrorMessage, msg, sizeof(ErrorMessage) - 1); |
71 | ErrorMessage[sizeof(ErrorMessage) - 1] = 0; |
72 | } |
73 | |
74 | #define PARAMETER_ASSERT(para, retval, eno) \ |
75 | if (!(para)) { \ |
76 | SetErrorMessage(Hdfs::Internal::GetSystemErrorInfo(eno)); \ |
77 | errno = eno; \ |
78 | return retval; \ |
79 | } |
80 | |
81 | static inline char * Strdup(const char * str) { |
82 | if (str == NULL) { |
83 | return NULL; |
84 | } |
85 | |
86 | int len = strlen(str); |
87 | char * retval = new char[len + 1]; |
88 | memcpy(retval, str, len + 1); |
89 | return retval; |
90 | } |
91 | |
92 | using Hdfs::InputStream; |
93 | using Hdfs::OutputStream; |
94 | using Hdfs::FileSystem; |
95 | using Hdfs::exception_ptr; |
96 | using Hdfs::Config; |
97 | using Hdfs::Internal::shared_ptr; |
98 | using Hdfs::NamenodeInfo; |
99 | using Hdfs::FileNotFoundException; |
100 | |
101 | struct HdfsFileInternalWrapper { |
102 | public: |
103 | HdfsFileInternalWrapper() : |
104 | input(true), stream(NULL) { |
105 | } |
106 | |
107 | ~HdfsFileInternalWrapper() { |
108 | if (input) { |
109 | delete static_cast<InputStream *>(stream); |
110 | } else { |
111 | delete static_cast<OutputStream *>(stream); |
112 | } |
113 | } |
114 | |
115 | InputStream & getInputStream() { |
116 | if (!input) { |
117 | THROW(Hdfs::HdfsException, |
118 | "Internal error: file was not opened for read." ); |
119 | } |
120 | |
121 | if (!stream) { |
122 | THROW(Hdfs::HdfsIOException, "File is not opened." ); |
123 | } |
124 | |
125 | return *static_cast<InputStream *>(stream); |
126 | } |
127 | OutputStream & getOutputStream() { |
128 | if (input) { |
129 | THROW(Hdfs::HdfsException, |
130 | "Internal error: file was not opened for write." ); |
131 | } |
132 | |
133 | if (!stream) { |
134 | THROW(Hdfs::HdfsIOException, "File is not opened." ); |
135 | } |
136 | |
137 | return *static_cast<OutputStream *>(stream); |
138 | } |
139 | |
140 | bool isInput() const { |
141 | return input; |
142 | } |
143 | |
144 | void setInput(bool input) { |
145 | this->input = input; |
146 | } |
147 | |
148 | void setStream(void * stream) { |
149 | this->stream = stream; |
150 | } |
151 | |
152 | private: |
153 | bool input; |
154 | void * stream; |
155 | }; |
156 | |
157 | struct HdfsFileSystemInternalWrapper { |
158 | public: |
159 | HdfsFileSystemInternalWrapper(FileSystem * fs) : |
160 | filesystem(fs) { |
161 | } |
162 | |
163 | ~HdfsFileSystemInternalWrapper() { |
164 | delete filesystem; |
165 | } |
166 | |
167 | FileSystem & getFilesystem() { |
168 | return *filesystem; |
169 | } |
170 | |
171 | private: |
172 | FileSystem * filesystem; |
173 | }; |
174 | |
175 | class DefaultConfig { |
176 | public: |
177 | DefaultConfig() : conf(new Hdfs::Config) { |
178 | bool reportError = false; |
179 | const char * env = getenv("LIBHDFS3_CONF" ); |
180 | std::string confPath = env ? env : "" ; |
181 | |
182 | if (!confPath.empty()) { |
183 | size_t pos = confPath.find_first_of('='); |
184 | |
185 | if (pos != confPath.npos) { |
186 | confPath = confPath.c_str() + pos + 1; |
187 | } |
188 | |
189 | reportError = true; |
190 | } else { |
191 | confPath = "hdfs-client.xml" ; |
192 | } |
193 | |
194 | init(confPath, reportError); |
195 | } |
196 | |
197 | DefaultConfig(const char * path) : conf(new Hdfs::Config) { |
198 | assert(path != NULL && strlen(path) > 0); |
199 | init(path, true); |
200 | } |
201 | |
202 | shared_ptr<Config> getConfig() { |
203 | return conf; |
204 | } |
205 | |
206 | private: |
207 | void init(const std::string & confPath, bool reportError) { |
208 | if (access(confPath.c_str(), R_OK)) { |
209 | if (reportError) { |
210 | LOG(Hdfs::Internal::LOG_ERROR, |
211 | "Environment variable LIBHDFS3_CONF is set but %s cannot be read" , |
212 | confPath.c_str()); |
213 | } else { |
214 | return; |
215 | } |
216 | } |
217 | |
218 | conf->update(confPath.c_str()); |
219 | } |
220 | private: |
221 | shared_ptr<Config> conf; |
222 | }; |
223 | |
224 | struct hdfsBuilder { |
225 | public: |
226 | hdfsBuilder() : |
227 | conf(DefaultConfig().getConfig()), port(0) { |
228 | } |
229 | |
230 | ~hdfsBuilder() { |
231 | } |
232 | |
233 | public: |
234 | std::string token; |
235 | shared_ptr<Config> conf; |
236 | std::string nn; |
237 | std::string userName; |
238 | tPort port; |
239 | }; |
240 | |
241 | static void handleException(Hdfs::exception_ptr error) { |
242 | try { |
243 | Hdfs::rethrow_exception(error); |
244 | |
245 | #ifndef NDEBUG |
246 | std::string buffer; |
247 | LOG(Hdfs::Internal::LOG_ERROR, "Handle Exception: %s" , |
248 | Hdfs::Internal::GetExceptionDetail(error, buffer)); |
249 | #endif |
250 | } catch (Hdfs::AccessControlException &) { |
251 | errno = EACCES; |
252 | } catch (Hdfs::AlreadyBeingCreatedException &) { |
253 | errno = EBUSY; |
254 | } catch (Hdfs::ChecksumException &) { |
255 | errno = EIO; |
256 | } catch (Hdfs::DSQuotaExceededException &) { |
257 | errno = ENOSPC; |
258 | } catch (Hdfs::FileAlreadyExistsException &) { |
259 | errno = EEXIST; |
260 | } catch (Hdfs::FileNotFoundException &) { |
261 | errno = ENOENT; |
262 | } catch (const Hdfs::HdfsBadBoolFoumat &) { |
263 | errno = EINVAL; |
264 | } catch (const Hdfs::HdfsBadConfigFoumat &) { |
265 | errno = EINVAL; |
266 | } catch (const Hdfs::HdfsBadNumFoumat &) { |
267 | errno = EINVAL; |
268 | } catch (const Hdfs::HdfsCanceled &) { |
269 | errno = EIO; |
270 | } catch (const Hdfs::HdfsConfigInvalid &) { |
271 | errno = EINVAL; |
272 | } catch (const Hdfs::HdfsConfigNotFound &) { |
273 | errno = EINVAL; |
274 | } catch (const Hdfs::HdfsEndOfStream &) { |
275 | errno = EOVERFLOW; |
276 | } catch (const Hdfs::HdfsInvalidBlockToken &) { |
277 | errno = EPERM; |
278 | } catch (const Hdfs::HdfsTimeoutException &) { |
279 | errno = EIO; |
280 | } catch (Hdfs::HadoopIllegalArgumentException &) { |
281 | errno = EINVAL; |
282 | } catch (Hdfs::InvalidParameter &) { |
283 | errno = EINVAL; |
284 | } catch (Hdfs::InvalidPath &) { |
285 | errno = EINVAL; |
286 | } catch (Hdfs::NotReplicatedYetException &) { |
287 | errno = EINVAL; |
288 | } catch (Hdfs::NSQuotaExceededException &) { |
289 | errno = EINVAL; |
290 | } catch (Hdfs::ParentNotDirectoryException &) { |
291 | errno = EACCES; |
292 | } catch (Hdfs::ReplicaNotFoundException &) { |
293 | errno = EACCES; |
294 | } catch (Hdfs::SafeModeException &) { |
295 | errno = EIO; |
296 | } catch (Hdfs::UnresolvedLinkException &) { |
297 | errno = EACCES; |
298 | } catch (Hdfs::HdfsRpcException &) { |
299 | errno = EIO; |
300 | } catch (Hdfs::HdfsNetworkException &) { |
301 | errno = EIO; |
302 | } catch (Hdfs::RpcNoSuchMethodException &) { |
303 | errno = ENOTSUP; |
304 | } catch (Hdfs::UnsupportedOperationException &) { |
305 | errno = ENOTSUP; |
306 | } catch (Hdfs::SaslException &) { |
307 | errno = EACCES; |
308 | } catch (Hdfs::NameNodeStandbyException &) { |
309 | errno = EIO; |
310 | } catch (Hdfs::RecoveryInProgressException &){ |
311 | errno = EBUSY; |
312 | } catch (Hdfs::HdfsIOException &) { |
313 | std::string buffer; |
314 | LOG(Hdfs::Internal::LOG_ERROR, "Handle Exception: %s" , Hdfs::Internal::GetExceptionDetail(error, buffer)); |
315 | errno = EIO; |
316 | } catch (Hdfs::HdfsException & e) { |
317 | std::string buffer; |
318 | LOG(Hdfs::Internal::LOG_ERROR, "Unexpected exception %s: %s" , typeid(e).name(), |
319 | Hdfs::Internal::GetExceptionDetail(e, buffer)); |
320 | errno = EINTERNAL; |
321 | } catch (std::exception & e) { |
322 | LOG(Hdfs::Internal::LOG_ERROR, "Unexpected exception %s: %s" , typeid(e).name(), e.what()); |
323 | errno = EINTERNAL; |
324 | } |
325 | } |
326 | |
327 | const char * hdfsGetLastError() { |
328 | return ErrorMessage; |
329 | } |
330 | |
331 | int hdfsFileIsOpenForRead(hdfsFile file) { |
332 | PARAMETER_ASSERT(file, 0, EINVAL); |
333 | return file->isInput() ? 1 : 0; |
334 | } |
335 | |
336 | int hdfsFileIsOpenForWrite(hdfsFile file) { |
337 | PARAMETER_ASSERT(file, 0, EINVAL); |
338 | return !file->isInput() ? 1 : 0; |
339 | } |
340 | |
341 | hdfsFS hdfsConnectAsUser(const char * host, tPort port, const char * user) { |
342 | hdfsFS retVal = NULL; |
343 | PARAMETER_ASSERT(host != NULL && strlen(host) > 0, NULL, EINVAL); |
344 | PARAMETER_ASSERT(port > 0, NULL, EINVAL); |
345 | PARAMETER_ASSERT(user != NULL && strlen(user) > 0, NULL, EINVAL); |
346 | struct hdfsBuilder * bld = hdfsNewBuilder(); |
347 | |
348 | if (!bld) |
349 | return NULL; |
350 | |
351 | hdfsBuilderSetNameNode(bld, host); |
352 | hdfsBuilderSetNameNodePort(bld, port); |
353 | hdfsBuilderSetUserName(bld, user); |
354 | retVal = hdfsBuilderConnect(bld); |
355 | hdfsFreeBuilder(bld); |
356 | return retVal; |
357 | } |
358 | |
359 | hdfsFS hdfsConnect(const char * host, tPort port) { |
360 | hdfsFS retVal = NULL; |
361 | PARAMETER_ASSERT(host != NULL && strlen(host) > 0, NULL, EINVAL); |
362 | PARAMETER_ASSERT(port > 0, NULL, EINVAL); |
363 | struct hdfsBuilder * bld = hdfsNewBuilder(); |
364 | |
365 | if (!bld) |
366 | return NULL; |
367 | |
368 | hdfsBuilderSetNameNode(bld, host); |
369 | hdfsBuilderSetNameNodePort(bld, port); |
370 | retVal = hdfsBuilderConnect(bld); |
371 | hdfsFreeBuilder(bld); |
372 | return retVal; |
373 | } |
374 | |
375 | hdfsFS hdfsConnectAsUserNewInstance(const char * host, tPort port, |
376 | const char * user) { |
377 | hdfsFS retVal = NULL; |
378 | PARAMETER_ASSERT(host != NULL && strlen(host) > 0, NULL, EINVAL); |
379 | PARAMETER_ASSERT(port > 0, NULL, EINVAL); |
380 | PARAMETER_ASSERT(user != NULL && strlen(user) > 0, NULL, EINVAL); |
381 | struct hdfsBuilder * bld = hdfsNewBuilder(); |
382 | |
383 | if (!bld) |
384 | return NULL; |
385 | |
386 | hdfsBuilderSetNameNode(bld, host); |
387 | hdfsBuilderSetNameNodePort(bld, port); |
388 | hdfsBuilderSetForceNewInstance(bld); |
389 | hdfsBuilderSetUserName(bld, user); |
390 | retVal = hdfsBuilderConnect(bld); |
391 | hdfsFreeBuilder(bld); |
392 | return retVal; |
393 | } |
394 | |
395 | hdfsFS hdfsConnectNewInstance(const char * host, tPort port) { |
396 | hdfsFS retVal = NULL; |
397 | PARAMETER_ASSERT(host != NULL && strlen(host) > 0, NULL, EINVAL); |
398 | PARAMETER_ASSERT(port > 0, NULL, EINVAL); |
399 | struct hdfsBuilder * bld = hdfsNewBuilder(); |
400 | |
401 | if (!bld) |
402 | return NULL; |
403 | |
404 | hdfsBuilderSetNameNode(bld, host); |
405 | hdfsBuilderSetNameNodePort(bld, port); |
406 | hdfsBuilderSetForceNewInstance(bld); |
407 | retVal = hdfsBuilderConnect(bld); |
408 | hdfsFreeBuilder(bld); |
409 | return retVal; |
410 | } |
411 | |
412 | hdfsFS hdfsBuilderConnect(struct hdfsBuilder * bld) { |
413 | PARAMETER_ASSERT(bld && !bld->nn.empty(), NULL, EINVAL); |
414 | Hdfs::Internal::SessionConfig conf(*bld->conf); |
415 | std::string uri; |
416 | std::stringstream ss; |
417 | ss.imbue(std::locale::classic()); |
418 | xmlURIPtr uriobj; |
419 | FileSystem * fs = NULL; |
420 | |
421 | if (0 == strcasecmp(bld->nn.c_str(), "default" )) { |
422 | uri = conf.getDefaultUri(); |
423 | } else { |
424 | /* |
425 | * handle scheme |
426 | */ |
427 | if (bld->nn.find("://" ) == bld->nn.npos) { |
428 | uri = "hdfs://" ; |
429 | } |
430 | |
431 | uri += bld->nn; |
432 | } |
433 | |
434 | uriobj = xmlParseURI(uri.c_str()); |
435 | |
436 | try { |
437 | if (!uriobj) { |
438 | THROW(Hdfs::InvalidParameter, "Cannot parse connection URI" ); |
439 | } |
440 | |
441 | if (uriobj->port != 0 && bld->port != 0) { |
442 | THROW(Hdfs::InvalidParameter, "Cannot determinate port" ); |
443 | } |
444 | |
445 | if (uriobj->user && !bld->userName.empty()) { |
446 | THROW(Hdfs::InvalidParameter, "Cannot determinate user name" ); |
447 | } |
448 | |
449 | ss << uriobj->scheme << "://" ; |
450 | |
451 | if (uriobj->user || !bld->userName.empty()) { |
452 | ss << (uriobj->user ? uriobj->user : bld->userName.c_str()) |
453 | << '@'; |
454 | } |
455 | |
456 | if (bld->port == 0 && uriobj->port == 0) { |
457 | ss << uriobj->server; |
458 | } else { |
459 | ss << uriobj->server << ":" << (uriobj->port ? uriobj->port : bld->port); |
460 | } |
461 | |
462 | uri = ss.str(); |
463 | } catch (const std::bad_alloc & e) { |
464 | if (uriobj) { |
465 | xmlFreeURI(uriobj); |
466 | } |
467 | |
468 | SetErrorMessage("Out of memory" ); |
469 | errno = ENOMEM; |
470 | return NULL; |
471 | } catch (...) { |
472 | if (uriobj) { |
473 | xmlFreeURI(uriobj); |
474 | } |
475 | |
476 | SetLastException(Hdfs::current_exception()); |
477 | handleException(Hdfs::current_exception()); |
478 | return NULL; |
479 | } |
480 | |
481 | xmlFreeURI(uriobj); |
482 | |
483 | try { |
484 | fs = new FileSystem(*bld->conf); |
485 | |
486 | if (!bld->token.empty()) { |
487 | fs->connect(uri.c_str(), NULL, bld->token.c_str()); |
488 | } else { |
489 | fs->connect(uri.c_str()); |
490 | } |
491 | |
492 | return new HdfsFileSystemInternalWrapper(fs); |
493 | } catch (const std::bad_alloc & e) { |
494 | SetErrorMessage("Out of memory" ); |
495 | delete fs; |
496 | errno = ENOMEM; |
497 | } catch (...) { |
498 | delete fs; |
499 | SetLastException(Hdfs::current_exception()); |
500 | handleException(Hdfs::current_exception()); |
501 | } |
502 | |
503 | return NULL; |
504 | } |
505 | |
506 | struct hdfsBuilder * hdfsNewBuilder(void) { |
507 | try { |
508 | return new struct hdfsBuilder; |
509 | } catch (const std::bad_alloc & e) { |
510 | SetErrorMessage("Out of memory" ); |
511 | errno = ENOMEM; |
512 | } catch (...) { |
513 | SetLastException(Hdfs::current_exception()); |
514 | handleException(Hdfs::current_exception()); |
515 | } |
516 | |
517 | return NULL; |
518 | } |
519 | |
520 | void hdfsFreeBuilder(struct hdfsBuilder * bld) { |
521 | delete bld; |
522 | } |
523 | |
524 | void hdfsBuilderSetForceNewInstance(struct hdfsBuilder * bld) { |
525 | assert(bld); |
526 | } |
527 | |
528 | void hdfsBuilderSetNameNode(struct hdfsBuilder * bld, const char * nn) { |
529 | assert(bld != NULL && nn != NULL); |
530 | bld->nn = nn; |
531 | } |
532 | |
533 | void hdfsBuilderSetNameNodePort(struct hdfsBuilder * bld, tPort port) { |
534 | assert(bld != NULL && port > 0); |
535 | bld->port = port; |
536 | } |
537 | |
538 | void hdfsBuilderSetUserName(struct hdfsBuilder * bld, const char * userName) { |
539 | assert(bld && userName && strlen(userName) > 0); |
540 | bld->userName = userName; |
541 | } |
542 | |
543 | void hdfsBuilderSetKerbTicketCachePath(struct hdfsBuilder * bld, |
544 | const char * kerbTicketCachePath) { |
545 | assert(bld && kerbTicketCachePath && strlen(kerbTicketCachePath) > 0); |
546 | hdfsBuilderConfSetStr(bld, KERBEROS_TICKET_CACHE_PATH, kerbTicketCachePath); |
547 | } |
548 | |
549 | void hdfsBuilderSetToken(struct hdfsBuilder * bld, const char * token) { |
550 | assert(bld && token && strlen(token) > 0 && bld->userName.empty()); |
551 | |
552 | try { |
553 | bld->token = token; |
554 | } catch (const std::bad_alloc & e) { |
555 | errno = ENOMEM; |
556 | } catch (...) { |
557 | handleException(Hdfs::current_exception()); |
558 | } |
559 | } |
560 | |
561 | int hdfsBuilderConfSetStr(struct hdfsBuilder * bld, const char * key, |
562 | const char * val) { |
563 | PARAMETER_ASSERT(bld && key && strlen(key) > 0, -1, EINVAL); |
564 | PARAMETER_ASSERT(val && strlen(val) > 0, -1, EINVAL); |
565 | |
566 | try { |
567 | bld->conf->set(key, val); |
568 | return 0; |
569 | } catch (const std::bad_alloc & e) { |
570 | SetErrorMessage("Out of memory" ); |
571 | errno = ENOMEM; |
572 | } catch (...) { |
573 | SetLastException(Hdfs::current_exception()); |
574 | handleException(Hdfs::current_exception()); |
575 | } |
576 | |
577 | return -1; |
578 | } |
579 | |
580 | int hdfsConfGetStr(const char * key, char ** val) { |
581 | PARAMETER_ASSERT(key && strlen(key) > 0 && val, -1, EINVAL); |
582 | |
583 | try { |
584 | std::string retval = DefaultConfig().getConfig()->getString(key); |
585 | *val = Strdup(retval.c_str()); |
586 | return 0; |
587 | } catch (const std::bad_alloc & e) { |
588 | SetErrorMessage("Out of memory" ); |
589 | errno = ENOMEM; |
590 | } catch (...) { |
591 | SetLastException(Hdfs::current_exception()); |
592 | handleException(Hdfs::current_exception()); |
593 | } |
594 | |
595 | return -1; |
596 | } |
597 | |
598 | void hdfsConfStrFree(char * val) { |
599 | delete[] val; |
600 | } |
601 | |
602 | int hdfsConfGetInt(const char * key, int32_t * val) { |
603 | PARAMETER_ASSERT(key && strlen(key) > 0 && val, -1, EINVAL); |
604 | |
605 | try { |
606 | *val = DefaultConfig().getConfig()->getInt32(key); |
607 | return 0; |
608 | } catch (const std::bad_alloc & e) { |
609 | SetErrorMessage("Out of memory" ); |
610 | errno = ENOMEM; |
611 | } catch (...) { |
612 | SetLastException(Hdfs::current_exception()); |
613 | handleException(Hdfs::current_exception()); |
614 | } |
615 | |
616 | return -1; |
617 | } |
618 | |
619 | int hdfsDisconnect(hdfsFS fs) { |
620 | try { |
621 | if (fs) { |
622 | fs->getFilesystem().disconnect(); |
623 | delete fs; |
624 | } |
625 | |
626 | return 0; |
627 | } catch (const std::bad_alloc & e) { |
628 | delete fs; |
629 | SetErrorMessage("Out of memory" ); |
630 | errno = ENOMEM; |
631 | } catch (...) { |
632 | delete fs; |
633 | SetLastException(Hdfs::current_exception()); |
634 | handleException(Hdfs::current_exception()); |
635 | } |
636 | |
637 | return -1; |
638 | } |
639 | |
640 | hdfsFile hdfsOpenFile(hdfsFS fs, const char * path, int flags, int bufferSize, |
641 | short replication, tOffset blocksize) { |
642 | PARAMETER_ASSERT(fs && path && strlen(path) > 0, NULL, EINVAL); |
643 | PARAMETER_ASSERT(bufferSize >= 0 && replication >= 0 && blocksize >= 0, NULL, EINVAL); |
644 | PARAMETER_ASSERT(!(flags & O_RDWR) && !((flags & O_EXCL) && (flags & O_CREAT)), NULL, ENOTSUP); |
645 | HdfsFileInternalWrapper * file = NULL; |
646 | OutputStream * os = NULL; |
647 | InputStream * is = NULL; |
648 | |
649 | try { |
650 | file = new HdfsFileInternalWrapper(); |
651 | |
652 | if ((flags & O_CREAT) || (flags & O_APPEND) || (flags & O_WRONLY)) { |
653 | int internalFlags = 0; |
654 | |
655 | if (flags & O_CREAT) { |
656 | internalFlags |= Hdfs::Create; |
657 | } else if ((flags & O_APPEND) && (flags & O_WRONLY)) { |
658 | internalFlags |= Hdfs::Create; |
659 | internalFlags |= Hdfs::Append; |
660 | } else if (flags & O_WRONLY) { |
661 | internalFlags |= Hdfs::Create; |
662 | internalFlags |= Hdfs::Overwrite; |
663 | } |
664 | |
665 | if (flags & O_SYNC) { |
666 | internalFlags |= Hdfs::SyncBlock; |
667 | } |
668 | |
669 | file->setInput(false); |
670 | os = new OutputStream; |
671 | os->open(fs->getFilesystem(), path, internalFlags, 0777, false, replication, |
672 | blocksize); |
673 | file->setStream(os); |
674 | } else { |
675 | file->setInput(true); |
676 | is = new InputStream; |
677 | is->open(fs->getFilesystem(), path, true); |
678 | file->setStream(is); |
679 | } |
680 | |
681 | return file; |
682 | } catch (const std::bad_alloc & e) { |
683 | delete file; |
684 | delete os; |
685 | delete is; |
686 | SetErrorMessage("Out of memory" ); |
687 | errno = ENOMEM; |
688 | } catch (...) { |
689 | delete file; |
690 | delete os; |
691 | delete is; |
692 | SetLastException(Hdfs::current_exception()); |
693 | handleException(Hdfs::current_exception()); |
694 | } |
695 | |
696 | return NULL; |
697 | } |
698 | |
699 | int hdfsCloseFile(hdfsFS fs, hdfsFile file) { |
700 | PARAMETER_ASSERT(fs, -1, EINVAL); |
701 | |
702 | try { |
703 | if (file) { |
704 | if (file->isInput()) { |
705 | file->getInputStream().close(); |
706 | } else { |
707 | file->getOutputStream().close(); |
708 | } |
709 | |
710 | delete file; |
711 | } |
712 | |
713 | return 0; |
714 | } catch (const std::bad_alloc & e) { |
715 | delete file; |
716 | SetErrorMessage("Out of memory" ); |
717 | errno = ENOMEM; |
718 | } catch (...) { |
719 | delete file; |
720 | SetLastException(Hdfs::current_exception()); |
721 | handleException(Hdfs::current_exception()); |
722 | } |
723 | |
724 | return -1; |
725 | } |
726 | |
727 | int hdfsExists(hdfsFS fs, const char * path) { |
728 | PARAMETER_ASSERT(fs && path && strlen(path) > 0, -1, EINVAL); |
729 | |
730 | try { |
731 | return fs->getFilesystem().exist(path) ? 0 : -1; |
732 | } catch (const std::bad_alloc & e) { |
733 | SetErrorMessage("Out of memory" ); |
734 | errno = ENOMEM; |
735 | } catch (...) { |
736 | SetLastException(Hdfs::current_exception()); |
737 | handleException(Hdfs::current_exception()); |
738 | } |
739 | |
740 | return -1; |
741 | } |
742 | |
743 | int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos) { |
744 | PARAMETER_ASSERT(fs && file && desiredPos >= 0, -1, EINVAL); |
745 | PARAMETER_ASSERT(file->isInput(), -1, EINVAL); |
746 | |
747 | try { |
748 | file->getInputStream().seek(desiredPos); |
749 | return 0; |
750 | } catch (const std::bad_alloc & e) { |
751 | SetErrorMessage("Out of memory" ); |
752 | errno = ENOMEM; |
753 | } catch (...) { |
754 | SetLastException(Hdfs::current_exception()); |
755 | handleException(Hdfs::current_exception()); |
756 | } |
757 | |
758 | return -1; |
759 | } |
760 | |
761 | tOffset hdfsTell(hdfsFS fs, hdfsFile file) { |
762 | PARAMETER_ASSERT(fs && file, -1, EINVAL); |
763 | |
764 | try { |
765 | if (file->isInput()) { |
766 | return file->getInputStream().tell(); |
767 | } else { |
768 | return file->getOutputStream().tell(); |
769 | } |
770 | } catch (const std::bad_alloc & e) { |
771 | SetErrorMessage("Out of memory" ); |
772 | errno = ENOMEM; |
773 | } catch (...) { |
774 | SetLastException(Hdfs::current_exception()); |
775 | handleException(Hdfs::current_exception()); |
776 | } |
777 | |
778 | return -1; |
779 | } |
780 | |
781 | tSize hdfsRead(hdfsFS fs, hdfsFile file, void * buffer, tSize length) { |
782 | PARAMETER_ASSERT(fs && file && buffer && length > 0, -1, EINVAL); |
783 | PARAMETER_ASSERT(file->isInput(), -1, EINVAL); |
784 | |
785 | try { |
786 | return file->getInputStream().read(static_cast<char *>(buffer), length); |
787 | } catch (const Hdfs::HdfsEndOfStream & e) { |
788 | return 0; |
789 | } catch (const std::bad_alloc & e) { |
790 | SetErrorMessage("Out of memory" ); |
791 | errno = ENOMEM; |
792 | } catch (...) { |
793 | SetLastException(Hdfs::current_exception()); |
794 | handleException(Hdfs::current_exception()); |
795 | } |
796 | |
797 | return -1; |
798 | } |
799 | |
800 | tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void * buffer, tSize length) { |
801 | PARAMETER_ASSERT(fs && file && buffer && length > 0, -1, EINVAL); |
802 | PARAMETER_ASSERT(!file->isInput(), -1, EINVAL); |
803 | |
804 | try { |
805 | file->getOutputStream().append(static_cast<const char *>(buffer), |
806 | length); |
807 | return length; |
808 | } catch (const std::bad_alloc & e) { |
809 | SetErrorMessage("Out of memory" ); |
810 | errno = ENOMEM; |
811 | } catch (...) { |
812 | SetLastException(Hdfs::current_exception()); |
813 | handleException(Hdfs::current_exception()); |
814 | } |
815 | |
816 | return -1; |
817 | } |
818 | |
819 | int hdfsFlush(hdfsFS fs, hdfsFile file) { |
820 | PARAMETER_ASSERT(fs && file && file, -1, EINVAL); |
821 | return hdfsHFlush(fs, file); |
822 | } |
823 | |
824 | int hdfsHFlush(hdfsFS fs, hdfsFile file) { |
825 | PARAMETER_ASSERT(fs && file && file, -1, EINVAL); |
826 | PARAMETER_ASSERT(!file->isInput(), -1, EINVAL); |
827 | |
828 | try { |
829 | file->getOutputStream().flush(); |
830 | return 0; |
831 | } catch (const std::bad_alloc & e) { |
832 | SetErrorMessage("Out of memory" ); |
833 | errno = ENOMEM; |
834 | } catch (...) { |
835 | SetLastException(Hdfs::current_exception()); |
836 | handleException(Hdfs::current_exception()); |
837 | } |
838 | |
839 | return -1; |
840 | } |
841 | |
842 | int hdfsSync(hdfsFS fs, hdfsFile file) { |
843 | PARAMETER_ASSERT(fs && file && file, -1, EINVAL); |
844 | PARAMETER_ASSERT(!file->isInput(), -1, EINVAL); |
845 | |
846 | try { |
847 | file->getOutputStream().sync(); |
848 | return 0; |
849 | } catch (const std::bad_alloc & e) { |
850 | SetErrorMessage("Out of memory" ); |
851 | errno = ENOMEM; |
852 | } catch (...) { |
853 | SetLastException(Hdfs::current_exception()); |
854 | handleException(Hdfs::current_exception()); |
855 | } |
856 | |
857 | return -1; |
858 | } |
859 | |
860 | int hdfsAvailable(hdfsFS fs, hdfsFile file) { |
861 | PARAMETER_ASSERT(fs && file && file, -1, EINVAL); |
862 | PARAMETER_ASSERT(file->isInput(), -1, EINVAL); |
863 | |
864 | try { |
865 | int max = std::numeric_limits<int>::max(); |
866 | int64_t retval = file->getInputStream().available(); |
867 | return retval < max ? retval : max; |
868 | } catch (const std::bad_alloc & e) { |
869 | SetErrorMessage("Out of memory" ); |
870 | errno = ENOMEM; |
871 | } catch (...) { |
872 | SetLastException(Hdfs::current_exception()); |
873 | handleException(Hdfs::current_exception()); |
874 | } |
875 | |
876 | return -1; |
877 | } |
878 | |
879 | int hdfsCopy(hdfsFS srcFS, const char *src, hdfsFS dstFS, const char *dst) { |
880 | PARAMETER_ASSERT(srcFS && dstFS, -1, EINVAL); |
881 | PARAMETER_ASSERT(src && strlen(src) > 0, -1, EINVAL); |
882 | PARAMETER_ASSERT(dst && strlen(dst) > 0, -1, EINVAL); |
883 | |
884 | errno = ENOTSUP; |
885 | return -1; |
886 | } |
887 | |
888 | int hdfsMove(hdfsFS srcFS, const char *src, hdfsFS dstFS, const char *dst) { |
889 | PARAMETER_ASSERT(srcFS && dstFS, -1, EINVAL); |
890 | PARAMETER_ASSERT(src && strlen(src) > 0, -1, EINVAL); |
891 | PARAMETER_ASSERT(dst && strlen(dst) > 0, -1, EINVAL); |
892 | |
893 | errno = ENOTSUP; |
894 | return -1; |
895 | } |
896 | |
897 | int hdfsDelete(hdfsFS fs, const char * path, int recursive) { |
898 | PARAMETER_ASSERT(fs && path && strlen(path) > 0, -1, EINVAL); |
899 | |
900 | try { |
901 | return fs->getFilesystem().deletePath(path, recursive) ? 0 : -1; |
902 | } catch (const std::bad_alloc & e) { |
903 | SetErrorMessage("Out of memory" ); |
904 | errno = ENOMEM; |
905 | } catch (...) { |
906 | SetLastException(Hdfs::current_exception()); |
907 | handleException(Hdfs::current_exception()); |
908 | } |
909 | |
910 | return -1; |
911 | } |
912 | |
913 | int hdfsRename(hdfsFS fs, const char * oldPath, const char * newPath) { |
914 | PARAMETER_ASSERT(fs && oldPath && strlen(oldPath) > 0, -1, EINVAL); |
915 | PARAMETER_ASSERT(newPath && strlen(newPath) > 0, -1, EINVAL); |
916 | |
917 | try { |
918 | return fs->getFilesystem().rename(oldPath, newPath) ? 0 : -1; |
919 | } catch (const std::bad_alloc & e) { |
920 | SetErrorMessage("Out of memory" ); |
921 | errno = ENOMEM; |
922 | } catch (...) { |
923 | SetLastException(Hdfs::current_exception()); |
924 | handleException(Hdfs::current_exception()); |
925 | } |
926 | |
927 | return -1; |
928 | } |
929 | |
930 | char * hdfsGetWorkingDirectory(hdfsFS fs, char * buffer, size_t bufferSize) { |
931 | PARAMETER_ASSERT(fs && buffer && bufferSize > 0, NULL, EINVAL); |
932 | |
933 | try { |
934 | std::string retval = fs->getFilesystem().getWorkingDirectory(); |
935 | PARAMETER_ASSERT(retval.length() + 1 <= bufferSize, NULL, ENOMEM); |
936 | strncpy(buffer, retval.c_str(), bufferSize); |
937 | return buffer; |
938 | } catch (const std::bad_alloc & e) { |
939 | SetErrorMessage("Out of memory" ); |
940 | errno = ENOMEM; |
941 | } catch (...) { |
942 | SetLastException(Hdfs::current_exception()); |
943 | handleException(Hdfs::current_exception()); |
944 | } |
945 | |
946 | return NULL; |
947 | } |
948 | |
949 | int hdfsSetWorkingDirectory(hdfsFS fs, const char * path) { |
950 | PARAMETER_ASSERT(fs && path && strlen(path) > 0, -1, EINVAL); |
951 | |
952 | try { |
953 | fs->getFilesystem().setWorkingDirectory(path); |
954 | return 0; |
955 | } catch (const std::bad_alloc & e) { |
956 | SetErrorMessage("Out of memory" ); |
957 | errno = ENOMEM; |
958 | } catch (...) { |
959 | SetLastException(Hdfs::current_exception()); |
960 | handleException(Hdfs::current_exception()); |
961 | } |
962 | |
963 | return -1; |
964 | } |
965 | |
966 | int hdfsCreateDirectory(hdfsFS fs, const char * path) { |
967 | PARAMETER_ASSERT(fs && path && strlen(path) > 0, -1, EINVAL); |
968 | |
969 | try { |
970 | return fs->getFilesystem().mkdirs(path, 0755) ? 0 : -1; |
971 | } catch (const std::bad_alloc & e) { |
972 | SetErrorMessage("Out of memory" ); |
973 | errno = ENOMEM; |
974 | } catch (...) { |
975 | SetLastException(Hdfs::current_exception()); |
976 | handleException(Hdfs::current_exception()); |
977 | } |
978 | |
979 | return -1; |
980 | } |
981 | |
982 | int hdfsSetReplication(hdfsFS fs, const char * path, int16_t replication) { |
983 | PARAMETER_ASSERT(fs && path && strlen(path) > 0 && replication > 0, -1, EINVAL); |
984 | |
985 | try { |
986 | return fs->getFilesystem().setReplication(path, replication) ? 0 : -1; |
987 | } catch (const std::bad_alloc & e) { |
988 | SetErrorMessage("Out of memory" ); |
989 | errno = ENOMEM; |
990 | } catch (...) { |
991 | SetLastException(Hdfs::current_exception()); |
992 | handleException(Hdfs::current_exception()); |
993 | } |
994 | |
995 | return -1; |
996 | } |
997 | |
998 | static void ConstructHdfsFileInfo(hdfsFileInfo * infos, |
999 | const std::vector<Hdfs::FileStatus> & status) { |
1000 | size_t size = status.size(); |
1001 | |
1002 | for (size_t i = 0; i < size; ++i) { |
1003 | infos[i].mBlockSize = status[i].getBlockSize(); |
1004 | infos[i].mGroup = Strdup(status[i].getGroup()); |
1005 | infos[i].mKind = |
1006 | status[i].isDirectory() ? |
1007 | kObjectKindDirectory : kObjectKindFile; |
1008 | infos[i].mLastAccess = status[i].getAccessTime() / 1000; |
1009 | infos[i].mLastMod = status[i].getModificationTime() / 1000; |
1010 | infos[i].mName = Strdup(status[i].getPath()); |
1011 | infos[i].mOwner = Strdup(status[i].getOwner()); |
1012 | infos[i].mPermissions = status[i].getPermission().toShort(); |
1013 | infos[i].mReplication = status[i].getReplication(); |
1014 | infos[i].mSize = status[i].getLength(); |
1015 | } |
1016 | } |
1017 | |
1018 | hdfsFileInfo * hdfsListDirectory(hdfsFS fs, const char * path, |
1019 | int * numEntries) { |
1020 | PARAMETER_ASSERT(fs && path && strlen(path) > 0 && numEntries, NULL, EINVAL); |
1021 | hdfsFileInfo * retval = NULL; |
1022 | int size = 0; |
1023 | |
1024 | try { |
1025 | std::vector<Hdfs::FileStatus> status = |
1026 | fs->getFilesystem().listAllDirectoryItems(path); |
1027 | size = status.size(); |
1028 | retval = new hdfsFileInfo[size]; |
1029 | memset(retval, 0, sizeof(hdfsFileInfo) * size); |
1030 | ConstructHdfsFileInfo(&retval[0], status); |
1031 | *numEntries = size; |
1032 | return retval; |
1033 | } catch (const std::bad_alloc & e) { |
1034 | SetErrorMessage("Out of memory" ); |
1035 | hdfsFreeFileInfo(retval, size); |
1036 | errno = ENOMEM; |
1037 | } catch (...) { |
1038 | SetLastException(Hdfs::current_exception()); |
1039 | hdfsFreeFileInfo(retval, size); |
1040 | handleException(Hdfs::current_exception()); |
1041 | } |
1042 | |
1043 | return NULL; |
1044 | } |
1045 | |
1046 | hdfsFileInfo * hdfsGetPathInfo(hdfsFS fs, const char * path) { |
1047 | PARAMETER_ASSERT(fs && path && strlen(path) > 0, NULL, EINVAL); |
1048 | hdfsFileInfo * retval = NULL; |
1049 | |
1050 | try { |
1051 | retval = new hdfsFileInfo[1]; |
1052 | memset(retval, 0, sizeof(hdfsFileInfo)); |
1053 | std::vector<Hdfs::FileStatus> status(1); |
1054 | status[0] = fs->getFilesystem().getFileStatus(path); |
1055 | ConstructHdfsFileInfo(retval, status); |
1056 | return retval; |
1057 | } catch (const std::bad_alloc & e) { |
1058 | SetErrorMessage("Out of memory" ); |
1059 | hdfsFreeFileInfo(retval, 1); |
1060 | errno = ENOMEM; |
1061 | } catch (...) { |
1062 | SetLastException(Hdfs::current_exception()); |
1063 | hdfsFreeFileInfo(retval, 1); |
1064 | handleException(Hdfs::current_exception()); |
1065 | } |
1066 | |
1067 | return NULL; |
1068 | } |
1069 | |
1070 | void hdfsFreeFileInfo(hdfsFileInfo * infos, int numEntries) { |
1071 | for (int i = 0; infos != NULL && i < numEntries; ++i) { |
1072 | delete [] infos[i].mGroup; |
1073 | delete [] infos[i].mName; |
1074 | delete [] infos[i].mOwner; |
1075 | } |
1076 | |
1077 | delete[] infos; |
1078 | } |
1079 | |
1080 | char ***hdfsGetHosts(hdfsFS fs, const char *path, tOffset start, |
1081 | tOffset length) { |
1082 | PARAMETER_ASSERT(fs && path && strlen(path) > 0, NULL, EINVAL); |
1083 | PARAMETER_ASSERT(start >= 0 && length > 0, NULL, EINVAL); |
1084 | char ***retval = NULL; |
1085 | |
1086 | try { |
1087 | std::vector<Hdfs::BlockLocation> bls = |
1088 | fs->getFilesystem().getFileBlockLocations(path, start, length); |
1089 | retval = new char **[bls.size() + 1]; |
1090 | memset(retval, 0, sizeof(char **) * (bls.size() + 1)); |
1091 | |
1092 | for (size_t i = 0; i < bls.size(); ++i) { |
1093 | const std::vector<std::string> &hosts = bls[i].getHosts(); |
1094 | retval[i] = new char *[hosts.size() + 1]; |
1095 | memset(retval[i], 0, sizeof(char *) * (hosts.size() + 1)); |
1096 | |
1097 | for (size_t j = 0; j < hosts.size(); ++j) { |
1098 | retval[i][j] = Strdup(hosts[j].c_str()); |
1099 | } |
1100 | } |
1101 | |
1102 | return retval; |
1103 | } catch (const std::bad_alloc &e) { |
1104 | SetErrorMessage("Out of memory" ); |
1105 | hdfsFreeHosts(retval); |
1106 | errno = ENOMEM; |
1107 | } catch (...) { |
1108 | SetLastException(Hdfs::current_exception()); |
1109 | hdfsFreeHosts(retval); |
1110 | handleException(Hdfs::current_exception()); |
1111 | } |
1112 | |
1113 | return NULL; |
1114 | } |
1115 | |
1116 | void hdfsFreeHosts(char ***blockHosts) { |
1117 | if (blockHosts == NULL) { |
1118 | return; |
1119 | } |
1120 | |
1121 | for (int i = 0; blockHosts[i] != NULL; ++i) { |
1122 | for (int j = 0; blockHosts[i][j] != NULL; ++j) { |
1123 | delete[] blockHosts[i][j]; |
1124 | } |
1125 | |
1126 | delete[] blockHosts[i]; |
1127 | } |
1128 | |
1129 | delete[] blockHosts; |
1130 | } |
1131 | |
1132 | tOffset hdfsGetDefaultBlockSize(hdfsFS fs) { |
1133 | PARAMETER_ASSERT(fs != NULL, -1, EINVAL); |
1134 | |
1135 | try { |
1136 | return fs->getFilesystem().getDefaultBlockSize(); |
1137 | } catch (const std::bad_alloc & e) { |
1138 | SetErrorMessage("Out of memory" ); |
1139 | errno = ENOMEM; |
1140 | } catch (...) { |
1141 | SetLastException(Hdfs::current_exception()); |
1142 | handleException(Hdfs::current_exception()); |
1143 | } |
1144 | |
1145 | return -1; |
1146 | } |
1147 | |
1148 | tOffset hdfsGetCapacity(hdfsFS fs) { |
1149 | PARAMETER_ASSERT(fs != NULL, -1, EINVAL); |
1150 | |
1151 | try { |
1152 | Hdfs::FileSystemStats stat = fs->getFilesystem().getStats(); |
1153 | return stat.getCapacity(); |
1154 | } catch (const std::bad_alloc & e) { |
1155 | SetErrorMessage("Out of memory" ); |
1156 | errno = ENOMEM; |
1157 | } catch (...) { |
1158 | SetLastException(Hdfs::current_exception()); |
1159 | handleException(Hdfs::current_exception()); |
1160 | } |
1161 | |
1162 | return -1; |
1163 | } |
1164 | |
1165 | tOffset hdfsGetUsed(hdfsFS fs) { |
1166 | PARAMETER_ASSERT(fs != NULL, -1, EINVAL); |
1167 | |
1168 | try { |
1169 | Hdfs::FileSystemStats stat = fs->getFilesystem().getStats(); |
1170 | return stat.getUsed(); |
1171 | } catch (const std::bad_alloc & e) { |
1172 | SetErrorMessage("Out of memory" ); |
1173 | errno = ENOMEM; |
1174 | } catch (...) { |
1175 | SetLastException(Hdfs::current_exception()); |
1176 | handleException(Hdfs::current_exception()); |
1177 | } |
1178 | |
1179 | return -1; |
1180 | } |
1181 | |
1182 | int hdfsChown(hdfsFS fs, const char * path, const char * owner, |
1183 | const char * group) { |
1184 | PARAMETER_ASSERT(fs && path && strlen(path) > 0, -1, EINVAL); |
1185 | PARAMETER_ASSERT((owner && strlen(owner) > 0) || (group && strlen(group) > 0), -1, EINVAL); |
1186 | |
1187 | try { |
1188 | fs->getFilesystem().setOwner(path, owner, group); |
1189 | return 0; |
1190 | } catch (const std::bad_alloc & e) { |
1191 | SetErrorMessage("Out of memory" ); |
1192 | errno = ENOMEM; |
1193 | } catch (...) { |
1194 | SetLastException(Hdfs::current_exception()); |
1195 | handleException(Hdfs::current_exception()); |
1196 | } |
1197 | |
1198 | return -1; |
1199 | } |
1200 | |
1201 | int hdfsChmod(hdfsFS fs, const char * path, short mode) { |
1202 | PARAMETER_ASSERT(fs && path && strlen(path) > 0, -1, EINVAL); |
1203 | |
1204 | try { |
1205 | fs->getFilesystem().setPermission(path, mode); |
1206 | return 0; |
1207 | } catch (const std::bad_alloc & e) { |
1208 | SetErrorMessage("Out of memory" ); |
1209 | errno = ENOMEM; |
1210 | } catch (...) { |
1211 | SetLastException(Hdfs::current_exception()); |
1212 | handleException(Hdfs::current_exception()); |
1213 | } |
1214 | |
1215 | return -1; |
1216 | } |
1217 | |
1218 | int hdfsUtime(hdfsFS fs, const char * path, tTime mtime, tTime atime) { |
1219 | PARAMETER_ASSERT(fs && path && strlen(path) > 0, -1, EINVAL); |
1220 | |
1221 | try { |
1222 | fs->getFilesystem().setTimes(path, mtime, atime); |
1223 | return 0; |
1224 | } catch (const std::bad_alloc & e) { |
1225 | SetErrorMessage("Out of memory" ); |
1226 | errno = ENOMEM; |
1227 | } catch (...) { |
1228 | SetLastException(Hdfs::current_exception()); |
1229 | handleException(Hdfs::current_exception()); |
1230 | } |
1231 | |
1232 | return -1; |
1233 | } |
1234 | |
1235 | int hdfsTruncate(hdfsFS fs, const char * path, tOffset pos, int * shouldWait) { |
1236 | PARAMETER_ASSERT(fs && path && strlen(path) > 0 && pos >= 0 && shouldWait, -1, EINVAL); |
1237 | |
1238 | try { |
1239 | *shouldWait = !fs->getFilesystem().truncate(path, pos); |
1240 | return 0; |
1241 | } catch (const std::bad_alloc & e) { |
1242 | SetErrorMessage("Out of memory" ); |
1243 | errno = ENOMEM; |
1244 | } catch (...) { |
1245 | SetLastException(Hdfs::current_exception()); |
1246 | handleException(Hdfs::current_exception()); |
1247 | } |
1248 | |
1249 | return -1; |
1250 | } |
1251 | |
1252 | char * hdfsGetDelegationToken(hdfsFS fs, const char * renewer) { |
1253 | PARAMETER_ASSERT(fs && renewer && strlen(renewer) > 0, NULL, EINVAL); |
1254 | |
1255 | try { |
1256 | std::string token = fs->getFilesystem().getDelegationToken(renewer); |
1257 | return Strdup(token.c_str()); |
1258 | } catch (const std::bad_alloc & e) { |
1259 | SetErrorMessage("Out of memory" ); |
1260 | errno = ENOMEM; |
1261 | } catch (...) { |
1262 | SetLastException(Hdfs::current_exception()); |
1263 | handleException(Hdfs::current_exception()); |
1264 | } |
1265 | |
1266 | return NULL; |
1267 | } |
1268 | |
1269 | void hdfsFreeDelegationToken(char * token) { |
1270 | if (!token) { |
1271 | return; |
1272 | } |
1273 | |
1274 | delete token; |
1275 | } |
1276 | |
1277 | int64_t hdfsRenewDelegationToken(hdfsFS fs, const char * token) { |
1278 | PARAMETER_ASSERT(fs && token && strlen(token) > 0, -1, EINVAL); |
1279 | |
1280 | try { |
1281 | return fs->getFilesystem().renewDelegationToken(token); |
1282 | } catch (const std::bad_alloc & e) { |
1283 | SetErrorMessage("Out of memory" ); |
1284 | errno = ENOMEM; |
1285 | } catch (...) { |
1286 | SetLastException(Hdfs::current_exception()); |
1287 | handleException(Hdfs::current_exception()); |
1288 | } |
1289 | |
1290 | return -1; |
1291 | } |
1292 | |
1293 | int hdfsCancelDelegationToken(hdfsFS fs, const char * token) { |
1294 | PARAMETER_ASSERT(fs && token && strlen(token) > 0, -1, EINVAL); |
1295 | |
1296 | try { |
1297 | fs->getFilesystem().cancelDelegationToken(token); |
1298 | return 0; |
1299 | } catch (const std::bad_alloc & e) { |
1300 | SetErrorMessage("Out of memory" ); |
1301 | errno = ENOMEM; |
1302 | } catch (...) { |
1303 | SetLastException(Hdfs::current_exception()); |
1304 | handleException(Hdfs::current_exception()); |
1305 | } |
1306 | |
1307 | return -1; |
1308 | } |
1309 | |
1310 | static Namenode * hdfsGetConfiguredNamenodesInternal(const char * nameservice, |
1311 | int * size, shared_ptr<Config> conf) { |
1312 | std::vector<NamenodeInfo> namenodeInfos = NamenodeInfo::GetHANamenodeInfo( |
1313 | nameservice, *conf); |
1314 | |
1315 | if (namenodeInfos.empty()) { |
1316 | return NULL; |
1317 | } |
1318 | |
1319 | Namenode * retval = new Namenode[namenodeInfos.size()]; |
1320 | |
1321 | for (size_t i = 0; i < namenodeInfos.size(); ++i) { |
1322 | if (namenodeInfos[i].getHttpAddr().empty()) { |
1323 | retval[i].http_addr = NULL; |
1324 | } else { |
1325 | retval[i].http_addr = Strdup(namenodeInfos[i].getHttpAddr().c_str()); |
1326 | } |
1327 | |
1328 | if (namenodeInfos[i].getRpcAddr().empty()) { |
1329 | retval[i].rpc_addr = NULL; |
1330 | } else { |
1331 | retval[i].rpc_addr = Strdup(namenodeInfos[i].getRpcAddr().c_str()); |
1332 | } |
1333 | } |
1334 | |
1335 | *size = namenodeInfos.size(); |
1336 | return retval; |
1337 | } |
1338 | |
1339 | Namenode * hdfsGetHANamenodes(const char * nameservice, int * size) { |
1340 | PARAMETER_ASSERT(nameservice && size, NULL, EINVAL); |
1341 | |
1342 | try { |
1343 | return hdfsGetConfiguredNamenodesInternal(nameservice, size, |
1344 | DefaultConfig().getConfig()); |
1345 | } catch (const std::bad_alloc & e) { |
1346 | SetErrorMessage("Out of memory" ); |
1347 | errno = ENOMEM; |
1348 | } catch (...) { |
1349 | SetLastException(Hdfs::current_exception()); |
1350 | handleException(Hdfs::current_exception()); |
1351 | } |
1352 | |
1353 | return NULL; |
1354 | } |
1355 | |
1356 | Namenode * hdfsGetHANamenodesWithConfig(const char * conf, |
1357 | const char * nameservice, int * size) { |
1358 | PARAMETER_ASSERT(conf && strlen(conf) > 0 && nameservice && size, NULL, EINVAL); |
1359 | |
1360 | try { |
1361 | return hdfsGetConfiguredNamenodesInternal(nameservice, size, |
1362 | DefaultConfig(conf).getConfig()); |
1363 | } catch (const std::bad_alloc & e) { |
1364 | SetErrorMessage("Out of memory" ); |
1365 | errno = ENOMEM; |
1366 | } catch (...) { |
1367 | SetLastException(Hdfs::current_exception()); |
1368 | handleException(Hdfs::current_exception()); |
1369 | } |
1370 | |
1371 | return NULL; |
1372 | } |
1373 | |
1374 | void hdfsFreeNamenodeInformation(Namenode * namenodes, int size) { |
1375 | if (namenodes && size > 0) { |
1376 | for (int i = 0; i < size; ++i) { |
1377 | delete[] namenodes[i].http_addr; |
1378 | delete[] namenodes[i].rpc_addr; |
1379 | } |
1380 | } |
1381 | |
1382 | delete[] namenodes; |
1383 | } |
1384 | |
1385 | static void ConstructFileBlockLocation(Hdfs::BlockLocation & bl, BlockLocation * target) { |
1386 | memset(target, 0, sizeof(BlockLocation)); |
1387 | target->corrupt = bl.isCorrupt(); |
1388 | target->numOfNodes = bl.getNames().size(); |
1389 | target->length = bl.getLength(); |
1390 | target->offset = bl.getOffset(); |
1391 | target->hosts = new char *[target->numOfNodes]; |
1392 | memset(target->hosts, 0, sizeof(char) * target->numOfNodes); |
1393 | target->names = new char *[target->numOfNodes]; |
1394 | memset(target->names, 0, sizeof(char) * target->numOfNodes); |
1395 | target->topologyPaths = new char *[target->numOfNodes]; |
1396 | memset(target->topologyPaths, 0, sizeof(char) * target->numOfNodes); |
1397 | const std::vector<std::string> & hosts = bl.getHosts(); |
1398 | const std::vector<std::string> & names = bl.getNames(); |
1399 | const std::vector<std::string> & topologyPaths = bl.getTopologyPaths(); |
1400 | |
1401 | for (int i = 0; i < target->numOfNodes; ++i) { |
1402 | target->hosts[i] = Strdup(hosts[i].c_str()); |
1403 | target->names[i] = Strdup(names[i].c_str()); |
1404 | target->topologyPaths[i] = Strdup(topologyPaths[i].c_str()); |
1405 | } |
1406 | } |
1407 | |
1408 | BlockLocation * hdfsGetFileBlockLocations(hdfsFS fs, const char * path, |
1409 | tOffset start, tOffset length, int * numOfBlock) { |
1410 | PARAMETER_ASSERT(fs && numOfBlock && path && strlen(path), NULL, EINVAL); |
1411 | PARAMETER_ASSERT(start >= 0 && length > 0, NULL, EINVAL); |
1412 | BlockLocation * retval = NULL; |
1413 | int size = 0; |
1414 | |
1415 | try { |
1416 | std::vector<Hdfs::BlockLocation> locations = fs->getFilesystem().getFileBlockLocations(path, start, length); |
1417 | size = locations.size(); |
1418 | retval = new BlockLocation[size]; |
1419 | |
1420 | for (int i = 0; i < size; ++i) { |
1421 | ConstructFileBlockLocation(locations[i], &retval[i]); |
1422 | } |
1423 | |
1424 | *numOfBlock = size; |
1425 | return retval; |
1426 | } catch (const std::bad_alloc & e) { |
1427 | SetErrorMessage("Out of memory" ); |
1428 | hdfsFreeFileBlockLocations(retval, size); |
1429 | errno = ENOMEM; |
1430 | } catch (...) { |
1431 | SetLastException(Hdfs::current_exception()); |
1432 | hdfsFreeFileBlockLocations(retval, size); |
1433 | handleException(Hdfs::current_exception()); |
1434 | } |
1435 | |
1436 | return NULL; |
1437 | } |
1438 | |
1439 | void hdfsFreeFileBlockLocations(BlockLocation * locations, int numOfBlock) { |
1440 | if (!locations) { |
1441 | return; |
1442 | } |
1443 | |
1444 | for (int i = 0; i < numOfBlock; ++i) { |
1445 | for (int j = 0; j < locations[i].numOfNodes; ++j) { |
1446 | delete [] locations[i].hosts[j]; |
1447 | delete [] locations[i].names[j]; |
1448 | delete [] locations[i].topologyPaths[j]; |
1449 | } |
1450 | |
1451 | delete [] locations[i].hosts; |
1452 | delete [] locations[i].names; |
1453 | delete [] locations[i].topologyPaths; |
1454 | } |
1455 | |
1456 | delete [] locations; |
1457 | } |
1458 | |
1459 | #ifdef __cplusplus |
1460 | } |
1461 | #endif |
1462 | |