Add an async helper function to connect an rpc context to a program/version
[deb_libnfs.git] / lib / libnfs.c
index 97663ac658ff2fe4d2285b7833c2c5bb8c79482b..8a2f16fcc21419dcae10361cec7013722396fe2c 100644 (file)
 
 #ifdef WIN32
 #include "win32_compat.h"
-#else
-#include <strings.h>
+#endif
+
+#ifdef HAVE_UTIME_H
 #include <utime.h>
-#include <netinet/in.h>
-#endif /*WIN32*/
+#endif
 
 #ifdef ANDROID
 #define statvfs statfs
 #include <sys/statvfs.h>
 #endif
 
+#ifdef HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+
+#ifdef HAVE_STRINGS_H
+#include <strings.h>
+#endif
+
 #include <stdio.h>
 #include <stdarg.h>
 #include <stdlib.h>
@@ -211,6 +219,155 @@ void nfs_destroy_context(struct nfs_context *nfs)
        free(nfs);
 }
 
+struct rpc_cb_data {
+       char *server;
+       uint32_t program;
+       uint32_t version;
+
+       rpc_cb cb;
+       void *private_data;
+};       
+
+void free_rpc_cb_data(struct rpc_cb_data *data)
+{
+       free(data->server);
+       data->server = NULL;
+       free(data);
+}
+
+static void rpc_connect_program_4_cb(struct rpc_context *rpc, int status, void *command_data, void *private_data)
+{
+       struct rpc_cb_data *data = private_data;
+
+       assert(rpc->magic == RPC_CONTEXT_MAGIC);
+
+       /* Dont want any more callbacks even if the socket is closed */
+       rpc->connect_cb = NULL;
+
+       if (status == RPC_STATUS_ERROR) {
+               data->cb(rpc, status, command_data, data->private_data);
+               free_rpc_cb_data(data);
+               return;
+       }
+       if (status == RPC_STATUS_CANCEL) {
+               data->cb(rpc, status, "Command was cancelled", data->private_data);
+               free_rpc_cb_data(data);
+               return;
+       }
+
+       data->cb(rpc, status, NULL, data->private_data);
+       free_rpc_cb_data(data);
+}
+
+static void rpc_connect_program_3_cb(struct rpc_context *rpc, int status, void *command_data, void *private_data)
+{
+       struct rpc_cb_data *data = private_data;
+       uint32_t rpc_port;
+
+       assert(rpc->magic == RPC_CONTEXT_MAGIC);
+
+       if (status == RPC_STATUS_ERROR) {       
+               data->cb(rpc, status, command_data, data->private_data);
+               free_rpc_cb_data(data);
+               return;
+       }
+       if (status == RPC_STATUS_CANCEL) {
+               data->cb(rpc, status, "Command was cancelled", data->private_data);
+               free_rpc_cb_data(data);
+               return;
+       }
+
+       rpc_port = *(uint32_t *)command_data;
+       if (rpc_port == 0) {
+               rpc_set_error(rpc, "RPC error. Program is not available on %s", data->server);
+               data->cb(rpc, RPC_STATUS_ERROR, rpc_get_error(rpc), data->private_data);
+               free_rpc_cb_data(data);
+               return;
+       }
+
+       rpc_disconnect(rpc, "normal disconnect");
+       if (rpc_connect_async(rpc, data->server, rpc_port, rpc_connect_program_4_cb, data) != 0) {
+               data->cb(rpc, status, command_data, data->private_data);
+               free_rpc_cb_data(data);
+               return;
+       }
+}
+
+static void rpc_connect_program_2_cb(struct rpc_context *rpc, int status, void *command_data, void *private_data)
+{
+       struct rpc_cb_data *data = private_data;
+
+       assert(rpc->magic == RPC_CONTEXT_MAGIC);
+
+       if (status == RPC_STATUS_ERROR) {
+               data->cb(rpc, status, command_data, data->private_data);
+               free_rpc_cb_data(data);
+               return;
+       }
+       if (status == RPC_STATUS_CANCEL) {
+               data->cb(rpc, status, "Command was cancelled", data->private_data);
+               free_rpc_cb_data(data);
+               return;
+       }
+
+       if (rpc_pmap_getport_async(rpc, data->program, data->version, IPPROTO_TCP, rpc_connect_program_3_cb, private_data) != 0) {
+               data->cb(rpc, status, command_data, data->private_data);
+               free_rpc_cb_data(data);
+               return;
+       }
+}
+
+static void rpc_connect_program_1_cb(struct rpc_context *rpc, int status, void *command_data, void *private_data)
+{
+       struct rpc_cb_data *data = private_data;
+
+       assert(rpc->magic == RPC_CONTEXT_MAGIC);
+
+       /* Dont want any more callbacks even if the socket is closed */
+       rpc->connect_cb = NULL;
+
+       if (status == RPC_STATUS_ERROR) {
+               data->cb(rpc, status, command_data, data->private_data);
+               free_rpc_cb_data(data);
+               return;
+       }
+       if (status == RPC_STATUS_CANCEL) {
+               data->cb(rpc, status, "Command was cancelled", data->private_data);
+               free_rpc_cb_data(data);
+               return;
+       }
+
+       if (rpc_pmap_null_async(rpc, rpc_connect_program_2_cb, data) != 0) {
+               data->cb(rpc, status, command_data, data->private_data);
+               free_rpc_cb_data(data);
+               return;
+       }
+}
+
+int rpc_connect_program_async(struct rpc_context *rpc, char *server, int program, int version, rpc_cb cb, void *private_data)
+{
+       struct rpc_cb_data *data;
+
+       data = malloc(sizeof(struct rpc_cb_data));
+       if (data == NULL) {
+               return -1;
+       }
+       memset(data, 0, sizeof(struct rpc_cb_data));
+       data->server       = strdup(server);
+       data->program      = program;
+       data->version      = version;
+
+       data->cb           = cb;
+       data->private_data = private_data;
+
+       if (rpc_connect_async(rpc, server, 111, rpc_connect_program_1_cb, data) != 0) {
+               rpc_set_error(rpc, "Failed to start connection");
+               free_rpc_cb_data(data);
+               return -1;
+       }
+       return 0;
+}
+
 void free_nfs_cb_data(struct nfs_cb_data *data)
 {
        if (data->saved_path != NULL) {
@@ -1020,7 +1177,7 @@ int nfs_pread_async(struct nfs_context *nfs, struct nfsfh *nfsfh, uint64_t offse
 
        /* trying to read more than maximum server read size, we has to chop it up into smaller
         * reads and collect into a reassembly buffer.
-        * we send all reads in parallell so that performance is still good.
+        * we send all reads in parallel so that performance is still good.
         */
        data->max_offset = offset;
        data->start_offset = offset;
@@ -1200,7 +1357,7 @@ int nfs_pwrite_async(struct nfs_context *nfs, struct nfsfh *nfsfh, uint64_t offs
 
        /* trying to write more than maximum server write size, we has to chop it up into smaller
         * chunks.
-        * we send all writes in parallell so that performance is still good.
+        * we send all writes in parallel so that performance is still good.
         */
        data->max_offset = offset;
        data->start_offset = offset;
@@ -2886,9 +3043,6 @@ int nfs_utime_async(struct nfs_context *nfs, const char *path, struct utimbuf *t
 }
 
 
-
-
-
 /*
  * Async access()
  */
@@ -3441,11 +3595,7 @@ uint64_t nfs_get_readmax(struct nfs_context *nfs)
  */
 uint64_t nfs_get_writemax(struct nfs_context *nfs)
 {
-       /* Some ZDR libraries can not marshall PDUs bigger than this */
-        if (nfs->writemax < 32768) {
-               return nfs->writemax;
-       }
-       return 32768;
+       return nfs->writemax;
 }
 
 void nfs_set_error(struct nfs_context *nfs, char *error_string, ...)