⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 hdfs.c

📁 hadoop:Nutch集群平台
💻 C
📖 第 1 页 / 共 4 页
字号:
    jint available = -1;    if (invokeMethod(env, (RetVal*)&available, &jException, INSTANCE, jInputStream,                 "org/apache/hadoop/fs/FSDataInputStream", "available",                 "()I") != 0) {        fprintf(stderr,             "Call to org.apache.hadoop.fs.FSDataInputStream::available failed!\n"            );        errno = EINTERNAL;        return -1;    }    return available;}int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst){    //JAVA EQUIVALENT    //  FileUtil::copy(srcFS, srcPath, dstFS, dstPath, deleteSource = false, conf)    //Get the JNIEnv* corresponding to current thread    JNIEnv* env = getJNIEnv();    //Parameters    jobject jSrcFS = (jobject)srcFS;    jobject jDstFS = (jobject)dstFS;    jobject jSrcPath = constructNewObjectOfPath(env, src);    jobject jDstPath = constructNewObjectOfPath(env, dst);    if (jSrcPath == NULL || jDstPath == NULL) {        return -1;    }    jthrowable jException;    int retval = 0;    //Create the org.apache.hadoop.conf.Configuration object    jobject jConfiguration = constructNewObjectOfClass(env, &jException,             "org/apache/hadoop/conf/Configuration", "()V");    if (jConfiguration == NULL) {        fprintf(stderr,                 "Can't construct instance of class org.apache.hadoop.conf.Configuration\n"                );        errno = EINTERNAL;        return -1;    }    //FileUtil::copy    jboolean deleteSource = 0; //Only copy    jboolean jRetVal = 0;    if (invokeMethod(env, (RetVal*)&jRetVal, &jException, STATIC,                 NULL, "org/apache/hadoop/fs/FileUtil", "copy",                "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z",                jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,                 jConfiguration) != 0) {        fprintf(stderr,           "Call to org.apache.hadoop.fs.FileUtil::copy failed!\n");        errno = EINTERNAL;        retval = -1;        goto done;    }    done:    //Delete unnecessary local references    destroyLocalReference(env, jConfiguration);    destroyLocalReference(env, jSrcPath);    destroyLocalReference(env, jDstPath);      return retval;}int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst){    //JAVA EQUIVALENT    //  FileUtil::copy(srcFS, srcPath, dstFS, dstPath, deleteSource = true, conf)    //Get the JNIEnv* corresponding to current thread    JNIEnv* env = getJNIEnv();    //Parameters    jobject jSrcFS = (jobject)srcFS;    jobject jDstFS = (jobject)dstFS;    jobject jSrcPath = constructNewObjectOfPath(env, src);    jobject jDstPath = constructNewObjectOfPath(env, dst);    if (jSrcPath == NULL || jDstPath == NULL) {        return -1;    }    jthrowable jException;    int retval = 0;    //Create the org.apache.hadoop.conf.Configuration object    jobject jConfiguration = constructNewObjectOfClass(env, &jException,             "org/apache/hadoop/conf/Configuration", "()V");    if (jConfiguration == NULL) {        fprintf(stderr,                 "Can't construct instance of class org.apache.hadoop.conf.Configuration\n"                );        errno = EINTERNAL;        return -1;    }    //FileUtil::copy    jboolean deleteSource = 1; //Delete src after copy    jboolean jRetVal = 0;    if (invokeMethod(env, (RetVal*)&jRetVal, &jException, STATIC,                 NULL, "org/apache/hadoop/fs/FileUtil", "copy",                "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z",                jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,                 jConfiguration) != 0) {        fprintf(stderr,           "Call to org.apache.hadoop.fs.FileUtil::copy(move) failed!\n");        errno = EINTERNAL;        retval = -1;        goto done;    }    done:    //Delete unnecessary local references    destroyLocalReference(env, jConfiguration);    destroyLocalReference(env, jSrcPath);    destroyLocalReference(env, jDstPath);      return retval;}int hdfsDelete(hdfsFS fs, const char* path){    // JAVA EQUIVALENT:    //  File f = new File(path);    //  bool retval = fs.delete(f);    //Get the JNIEnv* corresponding to current thread    JNIEnv* env = getJNIEnv();    jobject jFS = (jobject)fs;    jthrowable jException;    //Create an object of java.io.File    jobject jPath = constructNewObjectOfPath(env, path);    if (jPath == NULL) {        return -1;    }    //Delete the file    jboolean retval = 1;    if (invokeMethod(env, (RetVal*)&retval, &jException, INSTANCE, jFS,                 "org/apache/hadoop/fs/FileSystem", "delete",                 "(Lorg/apache/hadoop/fs/Path;)Z", jPath)) {        fprintf(stderr,                 "Call to org.apache.hadoop.fs.FileSystem::delete failed!\n");        errno = EINTERNAL;        return -1;    }    //Delete unnecessary local references    destroyLocalReference(env, jPath);    return (retval) ? 0 : -1;}int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath){    // JAVA EQUIVALENT:    //  Path old = new Path(oldPath);    //  Path new = new Path(newPath);    //  fs.rename(old, new);    //Get the JNIEnv* corresponding to current thread    JNIEnv* env = getJNIEnv();    jobject jFS = (jobject)fs;    jthrowable jException;    //Create objects of org.apache.hadoop.fs.Path    jobject jOldPath = constructNewObjectOfPath(env, oldPath);    jobject jNewPath = constructNewObjectOfPath(env, newPath);    if (jOldPath == NULL || jNewPath == NULL) {        return -1;    }    //Rename the file    jboolean retval = 1;    if (invokeMethod(env, (RetVal*)&retval, &jException, INSTANCE, jFS,                 "org/apache/hadoop/fs/FileSystem", "rename",                 "(Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/Path;)Z",                 jOldPath, jNewPath)) {        fprintf(stderr,                 "Call to org.apache.hadoop.fs.FileSystem::rename failed!\n");        errno = EINTERNAL;        return -1;    }    //Delete unnecessary local references    destroyLocalReference(env, jOldPath);    destroyLocalReference(env, jNewPath);    return (retval) ? 0 : -1;}int hdfsLock(hdfsFS fs, const char* path, int shared){    // JAVA EQUIVALENT:    //  Path p = new Path(path);    //  fs.lock(p);    //Get the JNIEnv* corresponding to current thread    JNIEnv* env = getJNIEnv();    //Parameters    jobject jFS = (jobject)fs;    jboolean jb_shared = shared;    jthrowable jException;    //Create an object of org.apache.hadoop.fs.Path    jobject jPath = constructNewObjectOfPath(env, path);    if (jPath == NULL) {        return -1;    }    //Lock the file    int retval = 0;    if (invokeMethod(env, NULL, &jException, INSTANCE, jFS,                 "org/apache/hadoop/fs/FileSystem", "lock",                 "(Lorg/apache/hadoop/fs/Path;Z)V", jPath, jb_shared)) {        fprintf(stderr,                 "Call to org.apache.hadoop.fs.FileSystem::lock failed!\n");        errno = EINTERNAL;        retval = -1;    }    done:    //Delete unnecessary local references    destroyLocalReference(env, jPath);    return retval;}int hdfsReleaseLock(hdfsFS fs, const char* path){    // JAVA EQUIVALENT:    //  Path f = new Path(path);    //  fs.release(f);    //Get the JNIEnv* corresponding to current thread    JNIEnv* env = getJNIEnv();    jobject jFS = (jobject)fs;    jthrowable jException;    //Create an object of java.io.File    jobject jPath = constructNewObjectOfPath(env, path);    if (jPath == NULL) {        return -1;    }    //Release the lock on the file    int retval = 0;    if (invokeMethod(env, NULL, &jException, INSTANCE, jFS,                 "org/apache/hadoop/fs/FileSystem", "release",                 "(Lorg/apache/hadoop/fs/Path;)V", jPath)) {        fprintf(stderr,                 "Call to org.apache.hadoop.fs.FileSystem::release failed!\n");        errno = EINTERNAL;        retval = -1;        goto done;    }    done:    //Delete unnecessary local references    destroyLocalReference(env, jPath);    return retval;}char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize){    // JAVA EQUIVALENT:    //  Path p = fs.getWorkingDirectory();     //  return p.toString()    //Get the JNIEnv* corresponding to current thread    JNIEnv* env = getJNIEnv();    jobject jFS = (jobject)fs;    jobject jPath = NULL;    jthrowable jException;    //FileSystem::getWorkingDirectory()    if (invokeMethod(env, (RetVal*)&jPath, &jException, INSTANCE, jFS,                 "org/apache/hadoop/fs/FileSystem", "getWorkingDirectory",                 "()Lorg/apache/hadoop/fs/Path;") || jPath == NULL) {        fprintf(stderr, "Call to FileSystem::getWorkingDirectory failed!\n");        errno = EINTERNAL;        return NULL;    }    //Path::toString()    jstring jPathString;    if (invokeMethod(env, (RetVal*)&jPathString, &jException, INSTANCE, jPath,                 "org/apache/hadoop/fs/Path", "toString", "()Ljava/lang/String;")) {         fprintf(stderr, "Call to Path::toString failed!\n");        errno = EINTERNAL;        destroyLocalReference(env, jPath);        return NULL;    }    //Copy to user-provided buffer    strncpy(buffer, (char*)(*env)->GetStringUTFChars(env, jPathString, NULL),             bufferSize);    //Delete unnecessary local references    (*env)->ReleaseStringUTFChars(env, jPathString,                                 (*env)->GetStringUTFChars(env, jPathString, NULL));    destroyLocalReference(env, jPath);    return buffer;}int hdfsSetWorkingDirectory(hdfsFS fs, const char* path){    // JAVA EQUIVALENT:    //  fs.setWorkingDirectory(Path(path));     //Get the JNIEnv* corresponding to current thread    JNIEnv* env = getJNIEnv();    jobject jFS = (jobject)fs;    jthrowable jException;    int retval = 0;    //Create an object of org.apache.hadoop.fs.Path    jobject jPath = constructNewObjectOfPath(env, path);    if (jPath == NULL) {        return -1;    }    //FileSystem::setWorkingDirectory()    if (invokeMethod(env, NULL, &jException, INSTANCE, jFS,                 "org/apache/hadoop/fs/FileSystem", "setWorkingDirectory",                 "(Lorg/apache/hadoop/fs/Path;)V", jPath) || jPath == NULL) {        fprintf(stderr, "Call to FileSystem::setWorkingDirectory failed!\n");        errno = EINTERNAL;        retval = -1;    }    done:    //Delete unnecessary local references    destroyLocalReference(env, jPath);    return retval;}int hdfsCreateDirectory(hdfsFS fs, const char* path){    // JAVA EQUIVALENT:    //  fs.mkdirs(new Path(path));    //Get the JNIEnv* corresponding to current thread    JNIEnv* env = getJNIEnv();    jobject jFS = (jobject)fs;    jthrowable jException;    //Create an object of org.apache.hadoop.fs.Path    jobject jPath = constructNewObjectOfPath(env, path);    if (jPath == NULL) {        return -1;    }    //Create the directory    jboolean jRetVal = 0;    if (invokeMethod(env, (RetVal*)&jRetVal, &jException, INSTANCE, jFS,                 "org/apache/hadoop/fs/FileSystem", "mkdirs",                 "(Lorg/apache/hadoop/fs/Path;)Z", jPath)) {        fprintf(stderr,                 "Call to org.apache.hadoop.fs.FileSystem::mkdirs failed!\n");        errno = EINTERNAL;        goto done;    }    done:    //Delete unnecessary local references    destroyLocalReference(env, jPath);    return (jRetVal) ? 0 : -1;}char*** hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length){    // JAVA EQUIVALENT:

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -