📄 ip_vs_ctl.c
字号:
dest->vfwmark, NIPQUAD(dest->addr), ntohs(dest->port), atomic_read(&dest->refcnt)); if (dest->addr == daddr && dest->port == dport && dest->vfwmark == svc->fwmark && dest->protocol == svc->protocol && (svc->fwmark || (dest->vaddr == svc->addr && dest->vport == svc->port))) { /* HIT */ return dest; } /* * Try to purge the destination from trash if not referenced */ if (atomic_read(&dest->refcnt) == 1) { IP_VS_DBG(3, "Removing destination %u/%u.%u.%u.%u:%u " "from trash\n", dest->vfwmark, NIPQUAD(dest->addr), ntohs(dest->port)); list_del(&dest->n_list); ip_vs_dst_reset(dest); __ip_vs_unbind_svc(dest); kfree(dest); } } return NULL;}/* * Clean up all the destinations in the trash * Called by the ip_vs_control_cleanup() * * When the ip_vs_control_clearup is activated by ipvs module exit, * the service tables must have been flushed and all the connections * are expired, and the refcnt of each destination in the trash must * be 1, so we simply release them here. */static void ip_vs_trash_cleanup(void){ struct ip_vs_dest *dest, *nxt; list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) { list_del(&dest->n_list); ip_vs_dst_reset(dest); __ip_vs_unbind_svc(dest); kfree(dest); }}static voidip_vs_zero_stats(struct ip_vs_stats *stats){ spin_lock_bh(&stats->lock); memset(stats, 0, (char *)&stats->lock - (char *)stats); spin_unlock_bh(&stats->lock); ip_vs_zero_estimator(stats);}/* * Update a destination in the given service */static void__ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, struct ip_vs_dest_user *udest){ int conn_flags; /* set the weight and the flags */ atomic_set(&dest->weight, udest->weight); conn_flags = udest->conn_flags | IP_VS_CONN_F_INACTIVE; /* check if local node and update the flags */ if (inet_addr_type(udest->addr) == RTN_LOCAL) { conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK) | IP_VS_CONN_F_LOCALNODE; } /* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */ if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != 0) { conn_flags |= IP_VS_CONN_F_NOOUTPUT; } else { /* * Put the real service in ip_vs_rtable if not present. * For now only for NAT! */ write_lock_bh(&__ip_vs_rs_lock); ip_vs_rs_hash(dest); write_unlock_bh(&__ip_vs_rs_lock); } atomic_set(&dest->conn_flags, conn_flags); /* bind the service */ if (!dest->svc) { __ip_vs_bind_svc(dest, svc); } else { if (dest->svc != svc) { __ip_vs_unbind_svc(dest); ip_vs_zero_stats(&dest->stats); __ip_vs_bind_svc(dest, svc); } } /* set the dest status flags */ dest->flags |= IP_VS_DEST_F_AVAILABLE; if (udest->u_threshold == 0 || udest->u_threshold > dest->u_threshold) dest->flags &= ~IP_VS_DEST_F_OVERLOAD; dest->u_threshold = udest->u_threshold; dest->l_threshold = udest->l_threshold;}/* * Create a destination for the given service */static intip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest, struct ip_vs_dest **dest_p){ struct ip_vs_dest *dest; unsigned atype; EnterFunction(2); atype = inet_addr_type(udest->addr); if (atype != RTN_LOCAL && atype != RTN_UNICAST) return -EINVAL; dest = kmalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); if (dest == NULL) { IP_VS_ERR("ip_vs_new_dest: kmalloc failed.\n"); return -ENOMEM; } memset(dest, 0, sizeof(struct ip_vs_dest)); dest->protocol = svc->protocol; dest->vaddr = svc->addr; dest->vport = svc->port; dest->vfwmark = svc->fwmark; dest->addr = udest->addr; dest->port = udest->port; atomic_set(&dest->activeconns, 0); atomic_set(&dest->inactconns, 0); atomic_set(&dest->persistconns, 0); atomic_set(&dest->refcnt, 0); INIT_LIST_HEAD(&dest->d_list); spin_lock_init(&dest->dst_lock); spin_lock_init(&dest->stats.lock); __ip_vs_update_dest(svc, dest, udest); ip_vs_new_estimator(&dest->stats); *dest_p = dest; LeaveFunction(2); return 0;}/* * Add a destination into an existing service */static intip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest){ struct ip_vs_dest *dest; __u32 daddr = udest->addr; __u16 dport = udest->port; int ret; EnterFunction(2); if (udest->weight < 0) { IP_VS_ERR("ip_vs_add_dest(): server weight less than zero\n"); return -ERANGE; } if (udest->l_threshold > udest->u_threshold) { IP_VS_ERR("ip_vs_add_dest(): lower threshold is higher than " "upper threshold\n"); return -ERANGE; } /* * Check if the dest already exists in the list */ dest = ip_vs_lookup_dest(svc, daddr, dport); if (dest != NULL) { IP_VS_DBG(1, "ip_vs_add_dest(): dest already exists\n"); return -EEXIST; } /* * Check if the dest already exists in the trash and * is from the same service */ dest = ip_vs_trash_get_dest(svc, daddr, dport); if (dest != NULL) { IP_VS_DBG(3, "Get destination %u.%u.%u.%u:%u from trash, " "refcnt=%d, service %u/%u.%u.%u.%u:%u\n", NIPQUAD(daddr), ntohs(dport), atomic_read(&dest->refcnt), dest->vfwmark, NIPQUAD(dest->vaddr), ntohs(dest->vport)); __ip_vs_update_dest(svc, dest, udest); /* * Get the destination from the trash */ list_del(&dest->n_list); ip_vs_new_estimator(&dest->stats); write_lock_bh(&__ip_vs_svc_lock); /* * Wait until all other svc users go away. */ IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); list_add(&dest->n_list, &svc->destinations); svc->num_dests++; /* call the update_service function of its scheduler */ svc->scheduler->update_service(svc); write_unlock_bh(&__ip_vs_svc_lock); return 0; } /* * Allocate and initialize the dest structure */ ret = ip_vs_new_dest(svc, udest, &dest); if (ret) { return ret; } /* * Add the dest entry into the list */ atomic_inc(&dest->refcnt); write_lock_bh(&__ip_vs_svc_lock); /* * Wait until all other svc users go away. */ IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); list_add(&dest->n_list, &svc->destinations); svc->num_dests++; /* call the update_service function of its scheduler */ svc->scheduler->update_service(svc); write_unlock_bh(&__ip_vs_svc_lock); LeaveFunction(2); return 0;}/* * Edit a destination in the given service */static intip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest){ struct ip_vs_dest *dest; __u32 daddr = udest->addr; __u16 dport = udest->port; EnterFunction(2); if (udest->weight < 0) { IP_VS_ERR("ip_vs_edit_dest(): server weight less than zero\n"); return -ERANGE; } if (udest->l_threshold > udest->u_threshold) { IP_VS_ERR("ip_vs_edit_dest(): lower threshold is higher than " "upper threshold\n"); return -ERANGE; } /* * Lookup the destination list */ dest = ip_vs_lookup_dest(svc, daddr, dport); if (dest == NULL) { IP_VS_DBG(1, "ip_vs_edit_dest(): dest doesn't exist\n"); return -ENOENT; } __ip_vs_update_dest(svc, dest, udest); write_lock_bh(&__ip_vs_svc_lock); /* Wait until all other svc users go away */ while (atomic_read(&svc->usecnt) > 1) {}; /* call the update_service, because server weight may be changed */ svc->scheduler->update_service(svc); write_unlock_bh(&__ip_vs_svc_lock); LeaveFunction(2); return 0;}/* * Delete a destination (must be already unlinked from the service) */static void __ip_vs_del_dest(struct ip_vs_dest *dest){ ip_vs_kill_estimator(&dest->stats); /* * Remove it from the d-linked list with the real services. */ write_lock_bh(&__ip_vs_rs_lock); ip_vs_rs_unhash(dest); write_unlock_bh(&__ip_vs_rs_lock); /* * Decrease the refcnt of the dest, and free the dest * if nobody refers to it (refcnt=0). Otherwise, throw * the destination into the trash. */ if (atomic_dec_and_test(&dest->refcnt)) { ip_vs_dst_reset(dest); /* simply decrease svc->refcnt here, let the caller check and release the service if nobody refers to it. Only user context can release destination and service, and only one user context can update virtual service at a time, so the operation here is OK */ atomic_dec(&dest->svc->refcnt); kfree(dest); } else { IP_VS_DBG(3, "Moving dest %u.%u.%u.%u:%u into trash, refcnt=%d\n", NIPQUAD(dest->addr), ntohs(dest->port), atomic_read(&dest->refcnt)); list_add(&dest->n_list, &ip_vs_dest_trash); atomic_inc(&dest->refcnt); }}/* * Unlink a destination from the given service */static void __ip_vs_unlink_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, int svcupd){ dest->flags &= ~IP_VS_DEST_F_AVAILABLE; /* * Remove it from the d-linked destination list. */ list_del(&dest->n_list); svc->num_dests--; if (svcupd) { /* * Call the update_service function of its scheduler */ svc->scheduler->update_service(svc); }}/* * Delete a destination server in the given service */static intip_vs_del_dest(struct ip_vs_service *svc,struct ip_vs_dest_user *udest){ struct ip_vs_dest *dest; __u32 daddr = udest->addr; __u16 dport = udest->port; EnterFunction(2); dest = ip_vs_lookup_dest(svc, daddr, dport); if (dest == NULL) { IP_VS_DBG(1, "ip_vs_del_dest(): destination not found!\n"); return -ENOENT; } write_lock_bh(&__ip_vs_svc_lock); /* * Wait until all other svc users go away. */ IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); /* * Unlink dest from the service */ __ip_vs_unlink_dest(svc, dest, 1); write_unlock_bh(&__ip_vs_svc_lock); /* * Delete the destination */ __ip_vs_del_dest(dest); LeaveFunction(2); return 0;}/* * Add a service into the service hash table */static intip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p){ int ret = 0; struct ip_vs_scheduler *sched = NULL; struct ip_vs_service *svc = NULL; /* increase the module use count */ ip_vs_use_count_inc(); /* Lookup the scheduler by 'u->sched_name' */ sched = ip_vs_scheduler_get(u->sched_name); if (sched == NULL) { IP_VS_INFO("Scheduler module ip_vs_%s not found\n", u->sched_name); ret = -ENOENT; goto out_mod_dec; } svc = (struct ip_vs_service *) kmalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); if (svc == NULL) { IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n"); ret = -ENOMEM; goto out_err; } memset(svc, 0, sizeof(struct ip_vs_service)); /* I'm the first user of the service */ atomic_set(&svc->usecnt, 1); atomic_set(&svc->refcnt, 0); svc->protocol = u->protocol; svc->addr = u->addr; svc->port = u->port; svc->fwmark = u->fwmark; svc->flags = u->flags; svc->timeout = u->timeout * HZ; svc->netmask = u->netmask; INIT_LIST_HEAD(&svc->destinations); rwlock_init(&svc->sched_lock); spin_lock_init(&svc->stats.lock); /* Bind the scheduler */ ret = ip_vs_bind_scheduler(svc, sched); if (ret) goto out_err; sched = NULL; /* Update the virtual service counters */ if (svc->port == FTPPORT) atomic_inc(&ip_vs_ftpsvc_counter); else if (svc->port == 0) atomic_inc(&ip_vs_nullsvc_counter); ip_vs_new_estimator(&svc->stats); ip_vs_num_services++; /* Hash the service into the service table */ write_lock_bh(&__ip_vs_svc_lock); ip_vs_svc_hash(svc); write_unlock_bh(&__ip_vs_svc_lock); *svc_p = svc; return 0; out_err: if (svc != NULL) { if (svc->scheduler) ip_vs_unbind_scheduler(svc); if (svc->inc) { local_bh_disable(); ip_vs_app_inc_put(svc->inc); local_bh_enable(); } kfree(svc); } ip_vs_scheduler_put(sched); out_mod_dec: /* decrease the module use count */ ip_vs_use_count_dec(); return ret;}/* * Edit a service and bind it with a new scheduler */static intip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user *u){ struct ip_vs_scheduler *sched, *old_sched; int ret = 0; /* * Lookup the scheduler, by 'u->sched_name' */ sched = ip_vs_scheduler_get(u->sched_name); if (sched == NULL) { IP_VS_INFO("Scheduler module ip_vs_%s not found\n", u->sched_name); return -ENOENT; } old_sched = sched; write_lock_bh(&__ip_vs_svc_lock); /* * Wait until all other svc users go away. */ IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); /* * Set the flags and timeout value */ svc->flags = u->flags | IP_VS_SVC_F_HASHED; svc->timeout = u->timeout * HZ; svc->netmask = u->netmask; old_sched = svc->scheduler; if (sched != old_sched) { /* * Unbind the old scheduler */ if ((ret = ip_vs_unbind_scheduler(svc))) { old_sched = sched; goto out; } /* * Bind the new scheduler */ if ((ret = ip_vs_bind_scheduler(svc, sched))) { /* * If ip_vs_bind_scheduler fails, restore the old * scheduler. * The main reason of failure is out of memory. * * The question is if the old scheduler can be * restored all the time. TODO: if it cannot be * restored some time, we must delete the service, * otherwise the system may crash. */ ip_vs_bind_scheduler(svc, old_sched); old_sched = sched; goto out; } } out: write_unlock_bh(&__ip_vs_svc_lock); if (old_sched) ip_vs_scheduler_put(old_sched); return ret;}/* * Delete a service from the service list * - The service must be unlinked, unlocked and not referenced! * - We are called under _bh lock */static void __ip_vs_del_service(struct ip_vs_service *svc){ struct ip_vs_dest *dest, *nxt; struct ip_vs_scheduler *old_sched;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -