[Pacemaker] [PATCH]Bug 2567 - crm resource migrate should support an optional "role" parameter
Dejan Muhamedagic
dejanmm at fastmail.fm
Fri Mar 18 13:24:56 UTC 2011
Hi,
On Fri, Mar 18, 2011 at 12:21:40PM +0100, Holger Teutsch wrote:
> Hi,
> I would like to submit 2 patches of an initial implementation for
> discussion.
>
> Patch 1 implements migration of the Master role of an m/s resource to
> another node in crm_resource
> Patch 2 adds support for the shell.
Great.
> crm_resource does this with options
>
> "crm_resource --move --resource ms_test --master --node devel2"
>
> The shell does the same with
>
> "crm resource migrate ms_test:master devel2"
>
> crm_resource insist on the options "--master --node xxx" if dealing with
> m/s resources.
>
> It is not easy to assess the expectations that a move command should
> fulfill for something more complex than a group.
>
> To recall:
>
> crm_resource --move resource
> creates a "standby" rule that moves the resource off the currently
> active node
>
> while
>
> crm_resource --move resource --node newnode
> creates a "prefer" rule that moves the resource to the new node.
>
> When dealing with clones and masters the behavior was random as the code
> only considers the node where the first instance of the clone was
> started.
>
> The new code behaves consistently for the master role of an m/s
> resource. The options "--master" and "rsc:master" are somewhat redundant
> as a "slave" move is not supported. Currently it's more an
> acknowledgement of the user.
>
> On the other hand it is desirable (and was requested several times on
> the ML) to stop a single resource instance of a clone or master on a
> specific node.
>
> Should that be implemented by something like
>
> "crm_resource --move-off --resource myresource --node devel2" ?
>
> or should
>
> crm_resource refuse to work on clones
>
> and/or should moving the master role be the default for m/s resources
> and the "--master" option discarded ?
I think that we also need to consider the case when clone-max is
less than the number of nodes. If I understood correctly what you
were saying. So, all of move slave and move master and move clone
should be possible.
Cheers,
Dejan
> Regards
> Holger
> # HG changeset patch
> # User Holger Teutsch <holger.teutsch at web.de>
> # Date 1300439791 -3600
> # Branch mig
> # Node ID dac1a4eae844f0bd857951b1154a171c80c25772
> # Parent b4f456380f60bd308acdc462215620f5bf530854
> crm_resource.c: Add support for move of Master role of a m/s resource
>
> diff -r b4f456380f60 -r dac1a4eae844 tools/crm_resource.c
> --- a/tools/crm_resource.c Thu Mar 17 09:41:25 2011 +0100
> +++ b/tools/crm_resource.c Fri Mar 18 10:16:31 2011 +0100
> @@ -52,6 +52,7 @@
> const char *prop_id = NULL;
> const char *prop_set = NULL;
> char *move_lifetime = NULL;
> +int move_master = 0;
> char rsc_cmd = 'L';
> char *our_pid = NULL;
> IPC_Channel *crmd_channel = NULL;
> @@ -192,6 +193,32 @@
> return 0;
> }
>
> +/* is m/s resource in master role on a host? */
> +static int
> +is_master_on(resource_t *rsc, const char *check_uname)
> +{
> + GListPtr lpc = NULL;
> +
> + if(rsc->variant > pe_native) {
> + /* recursively call down */
> + GListPtr gIter = rsc->children;
> + for(; gIter != NULL; gIter = gIter->next) {
> + if(is_master_on(gIter->data, check_uname))
> + return 1;
> + }
> + return 0;
> + }
> +
> + for(lpc = rsc->running_on; lpc != NULL; lpc = lpc->next) {
> + node_t *node = (node_t*)lpc->data;
> + if(rsc->variant == pe_native && rsc->role == RSC_ROLE_MASTER
> + && safe_str_eq(node->details->uname, check_uname)) {
> + return 1;
> + }
> + }
> + return 0;
> +}
> +
> #define cons_string(x) x?x:"NA"
> static void
> print_cts_constraints(pe_working_set_t *data_set)
> @@ -797,6 +824,7 @@
> static int
> move_resource(
> const char *rsc_id,
> + int move_master,
> const char *existing_node, const char *preferred_node,
> cib_t * cib_conn)
> {
> @@ -935,6 +963,10 @@
> crm_xml_add(rule, XML_ATTR_ID, id);
> crm_free(id);
>
> + if(move_master) {
> + crm_xml_add(rule, XML_RULE_ATTR_ROLE, "Master");
> + }
> +
> crm_xml_add(rule, XML_RULE_ATTR_SCORE, INFINITY_S);
> crm_xml_add(rule, XML_RULE_ATTR_BOOLEAN_OP, "and");
>
> @@ -1093,6 +1125,8 @@
> crm_free(prefix);
> }
>
> +/* out of single letter options */
> +#define OPT_MASTER (256 + 'm')
> static struct crm_option long_options[] = {
> /* Top-level Options */
> {"help", 0, 0, '?', "\t\tThis text"},
> @@ -1120,10 +1154,10 @@
> {"get-property", 1, 0, 'G', "Display the 'class', 'type' or 'provider' of a resource", 1},
> {"set-property", 1, 0, 'S', "(Advanced) Set the class, type or provider of a resource", 1},
> {"move", 0, 0, 'M',
> - "\t\tMove a resource from its current location, optionally specifying a destination (-N) and/or a period for which it should take effect (-u)"
> + "\t\tMove a resource from its current location, optionally specifying a role (--master), a destination (-N) and/or a period for which it should take effect (-u)"
> "\n\t\t\t\tIf -N is not specified, the cluster will force the resource to move by creating a rule for the current location and a score of -INFINITY"
> "\n\t\t\t\tNOTE: This will prevent the resource from running on this node until the constraint is removed with -U"},
> - {"un-move", 0, 0, 'U', "\tRemove all constraints created by a move command"},
> + {"un-move", 0, 0, 'U', "\t\tRemove all constraints created by a move command"},
>
> {"-spacer-", 1, 0, '-', "\nAdvanced Commands:"},
> {"delete", 0, 0, 'D', "\t\tDelete a resource from the CIB"},
> @@ -1137,6 +1171,7 @@
> {"resource-type", 1, 0, 't', "Resource type (primitive, clone, group, ...)"},
> {"parameter-value", 1, 0, 'v', "Value to use with -p, -g or -d"},
> {"lifetime", 1, 0, 'u', "\tLifespan of migration constraints\n"},
> + {"master", 0, 0, OPT_MASTER, "\t\tMaster role for migration constraints\n"},
> {"meta", 0, 0, 'm', "\t\tModify a resource's configuration option rather than one which is passed to the resource agent script. For use with -p, -g, -d"},
> {"utilization", 0, 0, 'z', "\tModify a resource's utilization attribute. For use with -p, -g, -d"},
> {"set-name", 1, 0, 's', "\t(Advanced) ID of the instance_attributes object to change"},
> @@ -1162,6 +1197,7 @@
> {"-spacer-", 1, 0, '-', " crm_resource --resource myResource --move", pcmk_option_example},
> {"-spacer-", 1, 0, '-', "Move 'myResource' to a specific machine:", pcmk_option_paragraph},
> {"-spacer-", 1, 0, '-', " crm_resource --resource myResource --move --node altNode", pcmk_option_example},
> + {"-spacer-", 1, 0, '-', " crm_resource --resource myMsResource --master --move --node altNode", pcmk_option_example},
> {"-spacer-", 1, 0, '-', "Allow (but not force) 'myResource' to move back to its original location:", pcmk_option_paragraph},
> {"-spacer-", 1, 0, '-', " crm_resource --resource myResource --un-move", pcmk_option_example},
> {"-spacer-", 1, 0, '-', "Tell the cluster that 'myResource' failed:", pcmk_option_paragraph},
> @@ -1269,7 +1305,10 @@
> case 'A':
> case 'a':
> rsc_cmd = flag;
> - break;
> + break;
> + case OPT_MASTER:
> + move_master = 1;
> + break;
> case 'p':
> case 'g':
> case 'd':
> @@ -1482,7 +1521,7 @@
> rc = cib_NOTEXISTS;
> goto bail;
> }
> - rc = move_resource(rsc_id, NULL, NULL, cib_conn);
> + rc = move_resource(rsc_id, 0, NULL, NULL, cib_conn);
>
> } else if(rsc_cmd == 'M') {
> node_t *dest = NULL;
> @@ -1514,21 +1553,38 @@
> "%s is not a known node\n", host_uname);
> rc = cib_NOTEXISTS;
>
> + } else if(host_uname != NULL
> + && rsc->variant == pe_master && ! move_master) {
> + CMD_ERR("Error performing operation: "
> + "must specify --master for move of m/s resource %s to %s\n",
> + rsc_id, host_uname);
> +
> + } else if(host_uname != NULL
> + && rsc->variant == pe_master && move_master && is_master_on(rsc, host_uname)) {
> + CMD_ERR("Error performing operation: "
> + "%s is already active on %s with role Master\n",
> + rsc_id, host_uname);
> +
> } else if(host_uname != NULL
> + && rsc->variant != pe_master
> && safe_str_eq(current_uname, host_uname)) {
> CMD_ERR("Error performing operation: "
> "%s is already active on %s\n",
> rsc_id, host_uname);
>
> + } else if(host_uname == NULL && rsc->variant == pe_master) {
> + CMD_ERR("Error performing operation: "
> + "must specify a host name (-N) for move of m/s resource %s\n",
> + rsc_id);
> +
> } else if(current_uname != NULL
> && (do_force || host_uname == NULL)) {
> - rc = move_resource(rsc_id, current_uname,
> + rc = move_resource(rsc_id, move_master, current_uname,
> host_uname, cib_conn);
>
> -
> } else if(host_uname != NULL) {
> rc = move_resource(
> - rsc_id, NULL, host_uname, cib_conn);
> + rsc_id, move_master, NULL, host_uname, cib_conn);
>
> } else {
> CMD_ERR("Resource %s not moved: "
> @@ -1536,7 +1592,7 @@
> " specified.\n", rsc_id);
> rc = cib_missing;
> }
> -
> +
> } else if(rsc_cmd == 'G') {
> if(rsc_id == NULL) {
> CMD_ERR("Must supply a resource id with -r\n");
> # HG changeset patch
> # User Holger Teutsch <holger.teutsch at web.de>
> # Date 1300444053 -3600
> # Branch mig
> # Node ID ce9f161ae81858c0f30dea9e8a95dd118536a5aa
> # Parent dac1a4eae844f0bd857951b1154a171c80c25772
> ui.py.in : Add support for move of Master role of a m/s resource
>
> diff -r dac1a4eae844 -r ce9f161ae818 shell/modules/ui.py.in
> --- a/shell/modules/ui.py.in Fri Mar 18 10:16:31 2011 +0100
> +++ b/shell/modules/ui.py.in Fri Mar 18 11:27:33 2011 +0100
> @@ -847,8 +847,16 @@
> return False
> return set_deep_meta_attr("is-managed","false",rsc)
> def migrate(self,cmd,*args):
> - """usage: migrate <rsc> [<node>] [<lifetime>] [force]"""
> - rsc = args[0]
> + """usage: migrate <rsc>[:master] [<node>] [<lifetime>] [force]"""
> + elem = args[0].split(':')
> + rsc = elem[0]
> + master = False
> + if len(elem) > 1:
> + master = elem[1]
> + if master != "master":
> + common_error("%s is invalid, specify 'master'" % master)
> + return False
> + master = True
> if not is_name_sane(rsc):
> return False
> node = None
> @@ -888,6 +896,8 @@
> opts = "%s --lifetime='%s'" % (opts,lifetime)
> if force or user_prefs.get_force():
> opts = "%s --force" % opts
> + if master:
> + opts = "%s --master" % opts
> return ext_cmd(self.rsc_migrate % (rsc,opts)) == 0
> def unmigrate(self,cmd,rsc):
> "usage: unmigrate <rsc>"
> _______________________________________________
> Pacemaker mailing list: Pacemaker at oss.clusterlabs.org
> http://oss.clusterlabs.org/mailman/listinfo/pacemaker
>
> Project Home: http://www.clusterlabs.org
> Getting started: http://www.clusterlabs.org/doc/Cluster_from_Scratch.pdf
> Bugs: http://developerbugs.linux-foundation.org/enter_bug.cgi?product=Pacemaker
More information about the Pacemaker
mailing list