I thought that the second call with a custom CRM object (where crm.resources == {}) might be a overriding the relation data but it turns out not to be the case base of "if v" here:
def configure_ha_resources(self, hacluster):
"""Inform the ha subordinate about each service it should manage. The
child class specifies the services via self.ha_resources
@param hacluster instance of interface class HAClusterRequires
""" RESOURCE_TYPES = {
'vips': self._add_ha_vips_config, 'haproxy': self._add_ha_haproxy_config, 'dnsha': self._add_dnsha_config,
}
if self.ha_resources:
for res_type in self.ha_resources: RESOURCE_TYPES[res_type](hacluster) hacluster.bind_resources(iface=self.config[IFACE_KEY]) # <-------- this
requires.py
def manage_resources(self, crm): # <-------- this
"""
Request for the hacluster to manage the resources defined in the
crm object.
res = CRM() res.primitive('res_neutron_haproxy', 'lsb:haproxy', op='monitor interval="5s"') res.init_services('haproxy') res.clone('cl_nova_haproxy', 'res_neutron_haproxy')
hacluster.manage_resources(crm)
:param crm: CRM() instance - Config object for Pacemaker resources
:returns: None
""" relation_data = { 'json_{}'.format(k): json.dumps(v, sort_keys=True)
for k, v in crm.items() if v
}
if data_changed('hacluster-manage_resources', relation_data): self.set_local(**relation_data) self.set_remote(**relation_data)
def bind_resources(self, iface=None, mcastport=None):
"""Inform the ha subordinate about each service it should manage. The
child class specifies the services via self.ha_resources
:param iface: string - Network interface to bind to
:param mcastport: int - Multicast port corosync should use for cluster management traffic
"""
if mcastport is None: mcastport = 4440 resources_dict = self.get_local('resources') self.bind_on(iface=iface, mcastport=mcastport)
if resources_dict: resources = relations.hacluster.common.CRM(**resources_dict) # <-------- this self.manage_resources(resources) # <-------- this
@reactive.when('ha.connected', 'ganesha-pool-configured', 'config.rendered')
@reactive.when_not('ha.available')
def cluster_connected(hacluster):
"""Configure HA resources in corosync"""
with charm.provide_charm_instance() as this_charm: this_charm.configure_ha_resources(hacluster) # <-------- this adds a vip resource via charms.openstack
for service in ['nfs-ganesha', 'manila-share']: ch_core.host.service('disable', service) ch_core.host.service('stop', service) hacluster.add_systemd_service('nfs-ganesha', 'nfs-ganesha', clone=False) hacluster.add_systemd_service('manila-share', 'manila-share', clone=False)
# This is a bit of a nasty hack to ensure that we can colocate the
# services to make manila + ganesha colocate. This can be tidied up
# once
# https://bugs.launchpad.net/charm-interface-hacluster/+bug/1880644
# is resolved
import hooks.relations.hacluster.common as hacluster_common # noqa
crm = hacluster_common.CRM() # <-------- this creates a new resource dict but it does not get populated from the existing local data. crm.colocation('ganesha_with_vip', 'inf', 'res_nfs_ganesha_nfs_ganesha', 'grp_ganesha_vips') crm.colocation('manila_with_vip', 'inf', 'res_manila_share_manila_share', 'grp_ganesha_vips') hacluster.manage_resources(crm) # # <-------- this adds json_collocations key values this_charm.assess_status()
Two code-paths set relation data on the HA relation.
1) cluster_connected -> this_charm. configure_ ha_resources -> configure_ ha_resources -> hacluster. bind_resources manage_ resources( crm)
2) cluster_connected -> hacluster.
I thought that the second call with a custom CRM object (where crm.resources == {}) might be a overriding the relation data but it turns out not to be the case base of "if v" here:
https:/ /github. com/openstack/ charm-interface -hacluster/ blob/9ea447c296 466ba9fdca1eb8e 9752bbd6a75cc59 /requires. py#L98
relation_ data = {
'json_ {}'.format( k): json.dumps(v, sort_keys=True)
for k, v in crm.items() if v
}
charms_ openstack/ charm/classes. py
def configure_ ha_resources( self, hacluster):
"""Inform the ha subordinate about each service it should manage. The
child class specifies the services via self.ha_resources
@param hacluster instance of interface class HAClusterRequires
RESOURCE_ TYPES = { ha_vips_ config,
'haproxy' : self._add_ ha_haproxy_ config,
'dnsha' : self._add_ dnsha_config,
RESOURCE_ TYPES[res_ type](hacluster )
hacluster. bind_resources( iface=self. config[ IFACE_KEY] ) # <-------- this
"""
'vips': self._add_
}
if self.ha_resources:
for res_type in self.ha_resources:
requires.py
def manage_ resources( self, crm): # <-------- this
"""
Request for the hacluster to manage the resources defined in the
crm object.
res = CRM()
res. primitive( 'res_neutron_ haproxy' , 'lsb:haproxy',
op= 'monitor interval="5s"')
res. init_services( 'haproxy' )
res. clone(' cl_nova_ haproxy' , 'res_neutron_ haproxy' )
:param crm: CRM() instance - Config object for Pacemaker resources
relation_ data = {
'json_ {}'.format( k): json.dumps(v, sort_keys=True) 'hacluster- manage_ resources' , relation_data):
self. set_local( **relation_ data)
self. set_remote( **relation_ data)
:returns: None
"""
for k, v in crm.items() if v
}
if data_changed(
def bind_resources( self, iface=None, mcastport=None):
"""Inform the ha subordinate about each service it should manage. The
child class specifies the services via self.ha_resources
:param iface: string - Network interface to bind to
management traffic
mcastport = 4440
resources_ dict = self.get_ local(' resources' )
self.bind_ on(iface= iface, mcastport= mcastport)
resources = relations. hacluster. common. CRM(**resources _dict) # <-------- this
self. manage_ resources( resources) # <-------- this
:param mcastport: int - Multicast port corosync should use for cluster
"""
if mcastport is None:
if resources_dict:
@reactive. when('ha. connected' , 'ganesha- pool-configured ',
'config. rendered' ) when_not( 'ha.available' ) connected( hacluster) : charm_instance( ) as this_charm:
this_charm. configure_ ha_resources( hacluster) # <-------- this adds a vip resource via charms.openstack
ch_ core.host. service( 'disable' , service)
ch_ core.host. service( 'stop', service)
hacluster. add_systemd_ service( 'nfs-ganesha' ,
'nfs- ganesha' ,
clone= False)
hacluster. add_systemd_ service( 'manila- share',
'manila- share',
clone= False) /bugs.launchpad .net/charm- interface- hacluster/ +bug/1880644 .hacluster. common as hacluster_common # noqa common. CRM() # <-------- this creates a new resource dict but it does not get populated from the existing local data.
crm.colocation ('ganesha_ with_vip' ,
'inf' ,
'res_ nfs_ganesha_ nfs_ganesha' ,
'grp_ ganesha_ vips')
crm.colocation ('manila_ with_vip' ,
'inf' ,
'res_ manila_ share_manila_ share',
'grp_ ganesha_ vips')
hacluster. manage_ resources( crm) # # <-------- this adds json_collocations key values
this_charm. assess_ status( )
@reactive.
def cluster_
"""Configure HA resources in corosync"""
with charm.provide_
for service in ['nfs-ganesha', 'manila-share']:
# This is a bit of a nasty hack to ensure that we can colocate the
# services to make manila + ganesha colocate. This can be tidied up
# once
# https:/
# is resolved
import hooks.relations
crm = hacluster_