⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 hdfs.c

📁 hadoop:Nutch集群平台
💻 C
📖 第 1 页 / 共 4 页
字号:
    //  fs.getFileCacheHints(new Path(path), start, length);    //Get the JNIEnv* corresponding to current thread    JNIEnv* env = getJNIEnv();    jobject jFS = (jobject)fs;    jthrowable jException;    //Create an object of org.apache.hadoop.fs.Path    jobject jPath = constructNewObjectOfPath(env, path);    if (jPath == NULL) {        return NULL;    }    //org.apache.hadoop.fs.FileSystem::getFileCacheHints    char*** blockHosts = NULL;    jobjectArray jFileCacheHints;    if (invokeMethod(env, (RetVal*)&jFileCacheHints, &jException, INSTANCE,                 jFS, "org/apache/hadoop/fs/FileSystem", "getFileCacheHints",                 "(Lorg/apache/hadoop/fs/Path;JJ)[[Ljava/lang/String;", jPath,                 start, length)) {        fprintf(stderr,                 "Call to org.apache.hadoop.fs.FileSystem::getFileCacheHints failed!\n"               );        errno = EINTERNAL;        goto done;    }    //Figure out no of entries in jFileCacheHints     //Allocate memory and add NULL at the end    jsize jNumFileBlocks = (*env)->GetArrayLength(env, jFileCacheHints);    blockHosts = malloc(sizeof(char**) * (jNumFileBlocks+1));    if (blockHosts == NULL) {        errno = ENOMEM;        goto done;    }    blockHosts[jNumFileBlocks] = NULL;    if (jNumFileBlocks == 0) {        errno = 0;        goto done;    }    //Now parse each block to get hostnames    int i = 0;    for(i=0; i < jNumFileBlocks; ++i) {        jobjectArray jFileBlockHosts = (*env)->GetObjectArrayElement(env,                                                         jFileCacheHints, i);        //Figure out no of entries in jFileCacheHints         //Allocate memory and add NULL at the end        jsize jNumBlockHosts = (*env)->GetArrayLength(env, jFileBlockHosts);        blockHosts[i] = malloc(sizeof(char*) * (jNumBlockHosts+1));        if (blockHosts[i] == NULL) {            int x = 0;            for(x=0; x < i; ++x) {                free(blockHosts[x]);            }            free(blockHosts);            errno = ENOMEM;            goto done;        }        blockHosts[i][jNumBlockHosts] = NULL;        //Now parse each hostname        int j = 0;        for(j=0; j < jNumBlockHosts; ++j) {            jstring jHost = (*env)->GetObjectArrayElement(env,                     jFileBlockHosts, j);            blockHosts[i][j] = strdup((char*)(*env)->GetStringUTFChars(env,                                                 jHost, NULL));            (*env)->ReleaseStringUTFChars(env, jHost,                                 (*env)->GetStringUTFChars(env, jHost, NULL));        }    }      done:    //Delete unnecessary local references    destroyLocalReference(env, jPath);    return blockHosts;}tOffset hdfsGetDefaultBlockSize(hdfsFS fs){    // JAVA EQUIVALENT:    //  fs.getDefaultBlockSize();    //Get the JNIEnv* corresponding to current thread    JNIEnv* env = getJNIEnv();    jobject jFS = (jobject)fs;    jthrowable jException;    //FileSystem::getDefaultBlockSize()    tOffset blockSize = -1;    if (invokeMethod(env, (RetVal*)&blockSize, &jException, INSTANCE, jFS,                 "org/apache/hadoop/fs/FileSystem", "getDefaultBlockSize",                 "()J") != 0) {        fprintf(stderr,                 "Call to org.apache.hadoop.fs.FileSystem::getDefaultBlockSize failed!\n"                );        errno = EINTERNAL;        return -1;    }    return blockSize;}tOffset hdfsGetCapacity(hdfsFS fs){    // JAVA EQUIVALENT:    //  fs.getRawCapacity();    //Get the JNIEnv* corresponding to current thread    JNIEnv* env = getJNIEnv();    jobject jFS = (jobject)fs;    jthrowable jException;    if (!((*env)->IsInstanceOf(env, jFS,                     globalClassReference("org/apache/hadoop/dfs/DistributedFileSystem",                         env)))) {        fprintf(stderr,                 "hdfsGetCapacity works only on a DistributedFileSystem!\n");        return -1;    }    //FileSystem::getRawCapacity()    tOffset rawCapacity = -1;    if (invokeMethod(env, (RetVal*)&rawCapacity, &jException, INSTANCE, jFS,                 "org/apache/hadoop/dfs/DistributedFileSystem",                 "getRawCapacity", "()J") != 0) {        fprintf(stderr,             "Call to org.apache.hadoop.fs.FileSystem::getRawCapacity failed!\n"            );        errno = EINTERNAL;        return -1;    }    return rawCapacity;}  tOffset hdfsGetUsed(hdfsFS fs){    // JAVA EQUIVALENT:    //  fs.getRawUsed();    //Get the JNIEnv* corresponding to current thread    JNIEnv* env = getJNIEnv();    jobject jFS = (jobject)fs;    jthrowable jException;    if (!((*env)->IsInstanceOf(env, jFS,                     globalClassReference("org/apache/hadoop/dfs/DistributedFileSystem",                         env)))) {        fprintf(stderr,                 "hdfsGetUsed works only on a DistributedFileSystem!\n");        return -1;    }    //FileSystem::getRawUsed()    tOffset rawUsed = -1;    if (invokeMethod(env, (RetVal*)&rawUsed, &jException, INSTANCE, jFS,                 "org/apache/hadoop/dfs/DistributedFileSystem", "getRawUsed",                 "()J") != 0) {        fprintf(stderr,             "Call to org.apache.hadoop.fs.FileSystem::getRawUsed failed!\n");        errno = EINTERNAL;        return -1;    }    return rawUsed;} static int getFileInfo(JNIEnv *env, jobject jFS, jobject jPath, hdfsFileInfo *fileInfo){    // JAVA EQUIVALENT:    //  fs.isDirectory(f)    //  fs.lastModified() ??    //  fs.getLength(f)    //  f.getPath()    jthrowable jException;    jboolean jIsDir;    if (invokeMethod(env, (RetVal*)&jIsDir, &jException, INSTANCE, jFS,                 "org/apache/hadoop/fs/FileSystem", "isDirectory",                 "(Lorg/apache/hadoop/fs/Path;)Z", jPath) != 0) {        fprintf(stderr,                 "Call to org.apache.hadoop.fs.FileSystem::isDirectory failed!\n"                );        errno = EINTERNAL;        return -1;    }    /*    jlong jModTime = 0;    if (invokeMethod(env, (RetVal*)&jModTime, &jException, INSTANCE, jFS,                 "org/apache/hadoop/fs/FileSystem", "lastModified",                 "(Lorg/apache/hadoop/fs/Path;)J", jPath) != 0) {        fprintf(stderr,                 "Call to org.apache.hadoop.fs.FileSystem::lastModified failed!\n"                );        errno = EINTERNAL;        return -1;    }    */    jlong jFileLength = 0;    if (!jIsDir) {        if (invokeMethod(env, (RetVal*)&jFileLength, &jException, INSTANCE,                     jFS, "org/apache/hadoop/fs/FileSystem", "getLength",                     "(Lorg/apache/hadoop/fs/Path;)J", jPath) != 0) {            fprintf(stderr,                     "Call to org.apache.hadoop.fs.FileSystem::getLength failed!\n"                    );            errno = EINTERNAL;            return -1;        }    }    jstring jPathName;    if (invokeMethod(env, (RetVal*)&jPathName, &jException, INSTANCE, jPath,                 "org/apache/hadoop/fs/Path", "toString", "()Ljava/lang/String;")) {         fprintf(stderr, "Call to org.apache.hadoop.fs.Path::toString failed!\n");        errno = EINTERNAL;        return -1;    }    fileInfo->mKind = (jIsDir ? kObjectKindDirectory : kObjectKindFile);    //fileInfo->mCreationTime = jModTime;    fileInfo->mSize = jFileLength;    fileInfo->mName = strdup((char*)(*env)->GetStringUTFChars(env,                 jPathName, NULL));    (*env)->ReleaseStringUTFChars(env, jPathName,                               (*env)->GetStringUTFChars(env, jPathName, NULL));    return 0;}hdfsFileInfo *hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries){    // JAVA EQUIVALENT:    //  Path p(path);    //  Path []pathList = fs.listPaths(p)    //  foreach path in pathList     //    getFileInfo(path)    //Get the JNIEnv* corresponding to current thread    JNIEnv* env = getJNIEnv();    jobject jFS = (jobject)fs;    jthrowable jException;    //Create an object of org.apache.hadoop.fs.Path    jobject jPath = constructNewObjectOfPath(env, path);    if (jPath == NULL) {        return NULL;    }    hdfsFileInfo *pathList = 0;     jobjectArray jPathList;    if (invokeMethod(env, (RetVal*)&jPathList, &jException, INSTANCE, jFS,                 "org/apache/hadoop/fs/FileSystem", "listPaths",                 "(Lorg/apache/hadoop/fs/Path;)[Lorg/apache/hadoop/fs/Path;", jPath) != 0) {        fprintf(stderr,                 "Call to org.apache.hadoop.fs.FileSystem::listPaths failed!\n"                );        errno = EINTERNAL;        goto done;    }    //Figure out no of entries in that directory    jsize jPathListSize = (*env)->GetArrayLength(env, jPathList);    *numEntries = jPathListSize;    if (jPathListSize == 0) {        errno = 0;        goto done;    }    //Allocate memory    pathList = malloc(sizeof(hdfsFileInfo) * jPathListSize);    if (pathList == NULL) {        errno = ENOMEM;        goto done;    }    //Save path information in pathList    jsize i;    for(i=0; i < jPathListSize; ++i) {        if (getFileInfo(env, jFS, (*env)->GetObjectArrayElement(env,                         jPathList, i), &pathList[i])) {            errno = EINTERNAL;            free(pathList);            goto done;        }    }    done:    //Delete unnecessary local references    destroyLocalReference(env, jPath);    return pathList;}hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path){    // JAVA EQUIVALENT:    //  File f(path);    //  fs.isDirectory(f)    //  fs.lastModified() ??    //  fs.getLength(f)    //  f.getPath()    //Get the JNIEnv* corresponding to current thread    JNIEnv* env = getJNIEnv();    jobject jFS = (jobject)fs;    jthrowable jException;    //Create an object of org.apache.hadoop.fs.Path    jobject jPath = constructNewObjectOfPath(env, path);    if (jPath == NULL) {        return NULL;    }    hdfsFileInfo *fileInfo = malloc(sizeof(hdfsFileInfo));    bzero(fileInfo, sizeof(hdfsFileInfo));    if (getFileInfo(env, jFS, jPath, fileInfo)) {        hdfsFreeFileInfo(fileInfo, 1);        fileInfo = NULL;        goto done;    }    done:    //Delete unnecessary local references    destroyLocalReference(env, jPath);    return fileInfo;}void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries){    //Free the mName    int i;    for (i=0; i < numEntries; ++i) {        if (hdfsFileInfo[i].mName) {            free(hdfsFileInfo[i].mName);        }    }    //Free entire block    free(hdfsFileInfo);}jobject hdfsConvertToGlobalRef(jobject localRef){    //Get the JNIEnv* corresponding to current thread    JNIEnv* env = getJNIEnv();    //Create the global reference    jobject globalRef = (*env)->NewGlobalRef(env, localRef);    if(globalRef == NULL) {        (*env)->ExceptionDescribe(env);        return NULL;     }    //Destroy the local reference    (*env)->DeleteLocalRef(env, globalRef);    return globalRef;}void hdfsDeleteGlobalRef(jobject globalRef){    //Get the JNIEnv* corresponding to current thread    JNIEnv* env = getJNIEnv();    //Destroy the global reference    (*env)->DeleteGlobalRef(env, globalRef);}/** * vim: ts=4: sw=4: et: */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -