id
stringlengths 7
14
| text
stringlengths 1
106k
|
---|---|
223551095_1163 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1164 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1165 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1166 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1167 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1168 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1169 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1170 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1171 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1172 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1173 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1174 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1175 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1176 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1177 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1178 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1179 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1180 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1181 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1182 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1183 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1184 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1185 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1186 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1187 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1188 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1189 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1190 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1191 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1192 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1193 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1194 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1195 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1196 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1197 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1198 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1199 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1200 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1201 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1202 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1203 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1204 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1205 | protected void setStackToolsAndFeatures(Configuration configuration, Set<String> configTypesUpdated)
throws ConfigurationTopologyException {
ConfigHelper configHelper = clusterTopology.getAmbariContext().getConfigHelper();
Stack stack = clusterTopology.getBlueprint().getStack();
String stackName = stack.getName();
String stackVersion = stack.getVersion();
StackId stackId = new StackId(stackName, stackVersion);
Set<String> properties = Sets.newHashSet(
ConfigHelper.CLUSTER_ENV_STACK_NAME_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_PACKAGES_PROPERTY);
try {
Map<String, Map<String, String>> defaultStackProperties = configHelper.getDefaultStackProperties(stackId);
Map<String,String> clusterEnvDefaultProperties = defaultStackProperties.get(CLUSTER_ENV_CONFIG_TYPE_NAME);
for( String property : properties ){
if (clusterEnvDefaultProperties.containsKey(property)) {
String newValue = clusterEnvDefaultProperties.get(property);
String previous = configuration.setProperty(CLUSTER_ENV_CONFIG_TYPE_NAME, property, newValue);
if (!Objects.equals(
trimValue(previous, stack, CLUSTER_ENV_CONFIG_TYPE_NAME, property),
trimValue(newValue, stack, CLUSTER_ENV_CONFIG_TYPE_NAME, property))) {
// in case a property is updated make sure to include cluster-env as being updated
configTypesUpdated.add(CLUSTER_ENV_CONFIG_TYPE_NAME);
}
}
}
} catch( AmbariException ambariException ){
throw new ConfigurationTopologyException("Unable to retrieve the stack tools and features",
ambariException);
}
} |
223551095_1206 | protected void setStackToolsAndFeatures(Configuration configuration, Set<String> configTypesUpdated)
throws ConfigurationTopologyException {
ConfigHelper configHelper = clusterTopology.getAmbariContext().getConfigHelper();
Stack stack = clusterTopology.getBlueprint().getStack();
String stackName = stack.getName();
String stackVersion = stack.getVersion();
StackId stackId = new StackId(stackName, stackVersion);
Set<String> properties = Sets.newHashSet(
ConfigHelper.CLUSTER_ENV_STACK_NAME_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_PACKAGES_PROPERTY);
try {
Map<String, Map<String, String>> defaultStackProperties = configHelper.getDefaultStackProperties(stackId);
Map<String,String> clusterEnvDefaultProperties = defaultStackProperties.get(CLUSTER_ENV_CONFIG_TYPE_NAME);
for( String property : properties ){
if (clusterEnvDefaultProperties.containsKey(property)) {
String newValue = clusterEnvDefaultProperties.get(property);
String previous = configuration.setProperty(CLUSTER_ENV_CONFIG_TYPE_NAME, property, newValue);
if (!Objects.equals(
trimValue(previous, stack, CLUSTER_ENV_CONFIG_TYPE_NAME, property),
trimValue(newValue, stack, CLUSTER_ENV_CONFIG_TYPE_NAME, property))) {
// in case a property is updated make sure to include cluster-env as being updated
configTypesUpdated.add(CLUSTER_ENV_CONFIG_TYPE_NAME);
}
}
}
} catch( AmbariException ambariException ){
throw new ConfigurationTopologyException("Unable to retrieve the stack tools and features",
ambariException);
}
} |
223551095_1207 | protected void setStackToolsAndFeatures(Configuration configuration, Set<String> configTypesUpdated)
throws ConfigurationTopologyException {
ConfigHelper configHelper = clusterTopology.getAmbariContext().getConfigHelper();
Stack stack = clusterTopology.getBlueprint().getStack();
String stackName = stack.getName();
String stackVersion = stack.getVersion();
StackId stackId = new StackId(stackName, stackVersion);
Set<String> properties = Sets.newHashSet(
ConfigHelper.CLUSTER_ENV_STACK_NAME_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_PACKAGES_PROPERTY);
try {
Map<String, Map<String, String>> defaultStackProperties = configHelper.getDefaultStackProperties(stackId);
Map<String,String> clusterEnvDefaultProperties = defaultStackProperties.get(CLUSTER_ENV_CONFIG_TYPE_NAME);
for( String property : properties ){
if (clusterEnvDefaultProperties.containsKey(property)) {
String newValue = clusterEnvDefaultProperties.get(property);
String previous = configuration.setProperty(CLUSTER_ENV_CONFIG_TYPE_NAME, property, newValue);
if (!Objects.equals(
trimValue(previous, stack, CLUSTER_ENV_CONFIG_TYPE_NAME, property),
trimValue(newValue, stack, CLUSTER_ENV_CONFIG_TYPE_NAME, property))) {
// in case a property is updated make sure to include cluster-env as being updated
configTypesUpdated.add(CLUSTER_ENV_CONFIG_TYPE_NAME);
}
}
}
} catch( AmbariException ambariException ){
throw new ConfigurationTopologyException("Unable to retrieve the stack tools and features",
ambariException);
}
} |
223551095_1208 | static String[] parseNameServices(Map<String, String> properties) {
String nameServices = properties.get("dfs.internal.nameservices");
if (nameServices == null) {
nameServices = properties.get("dfs.nameservices");
}
return splitAndTrimStrings(nameServices);
} |
223551095_1209 | static String[] parseNameNodes(String nameService, Map<String, String> properties) {
final String nameNodes = properties.get("dfs.ha.namenodes." + nameService);
return splitAndTrimStrings(nameNodes);
} |
223551095_1210 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1211 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1212 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1213 | public Set<String> getRequiredHostGroups() {
Set<String> requiredHostGroups = new HashSet<>();
Collection<Map<String, Map<String, PropertyUpdater>>> updaters = createCollectionOfUpdaters();
// Iterate all registered updaters and collect host groups referenced by related properties and
// extracted by the updaters
for (Map<String, Map<String, PropertyUpdater>> updaterMap : updaters) {
for (Map.Entry<String, Map<String, PropertyUpdater>> entry : updaterMap.entrySet()) {
String type = entry.getKey();
for (Map.Entry<String, PropertyUpdater> updaterEntry : entry.getValue().entrySet()) {
String propertyName = updaterEntry.getKey();
PropertyUpdater updater = updaterEntry.getValue();
// cluster scoped configuration which also includes all default and BP properties
Map<String, Map<String, String>> clusterProps = clusterTopology.getConfiguration().getFullProperties();
Map<String, String> typeMap = clusterProps.get(type);
if (typeMap != null && typeMap.containsKey(propertyName) && typeMap.get(propertyName) != null) {
requiredHostGroups.addAll(updater.getRequiredHostGroups(
propertyName, typeMap.get(propertyName), clusterProps, clusterTopology));
}
// host group configs
for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) {
Map<String, Map<String, String>> hgConfigProps = groupInfo.getConfiguration().getProperties();
Map<String, String> hgTypeMap = hgConfigProps.get(type);
if (hgTypeMap != null && hgTypeMap.containsKey(propertyName)) {
requiredHostGroups.addAll(updater.getRequiredHostGroups(
propertyName, hgTypeMap.get(propertyName), hgConfigProps, clusterTopology));
}
}
}
}
}
// Iterate through all user defined properties (blueprint + cluster template only, no stack defaults) that do not
// have a registered updater. These properties can reference host groups too which should be extracted by the default
// updater
Set<Pair<String, String>> propertiesWithUpdaters = getAllPropertiesWithUpdaters(updaters);
// apply default updater on cluster config
Map<String, Map<String, String>> userDefinedClusterProperties = clusterTopology.getConfiguration().getFullProperties(1);
addRequiredHostgroupsByDefaultUpdater(userDefinedClusterProperties, propertiesWithUpdaters, requiredHostGroups);
// apply default updater on hostgroup configs
clusterTopology.getHostGroupInfo().values().stream().forEach(
hostGroup -> {
Configuration hostGroupConfig = hostGroup.getConfiguration();
Map<String, Map<String, String>> hostGroupConfigProps = hostGroupConfig.getFullProperties(1);
addRequiredHostgroupsByDefaultUpdater(hostGroupConfigProps, propertiesWithUpdaters, requiredHostGroups);
});
return requiredHostGroups;
} |
223551095_1214 | public Set<String> getRequiredHostGroups() {
Set<String> requiredHostGroups = new HashSet<>();
Collection<Map<String, Map<String, PropertyUpdater>>> updaters = createCollectionOfUpdaters();
// Iterate all registered updaters and collect host groups referenced by related properties and
// extracted by the updaters
for (Map<String, Map<String, PropertyUpdater>> updaterMap : updaters) {
for (Map.Entry<String, Map<String, PropertyUpdater>> entry : updaterMap.entrySet()) {
String type = entry.getKey();
for (Map.Entry<String, PropertyUpdater> updaterEntry : entry.getValue().entrySet()) {
String propertyName = updaterEntry.getKey();
PropertyUpdater updater = updaterEntry.getValue();
// cluster scoped configuration which also includes all default and BP properties
Map<String, Map<String, String>> clusterProps = clusterTopology.getConfiguration().getFullProperties();
Map<String, String> typeMap = clusterProps.get(type);
if (typeMap != null && typeMap.containsKey(propertyName) && typeMap.get(propertyName) != null) {
requiredHostGroups.addAll(updater.getRequiredHostGroups(
propertyName, typeMap.get(propertyName), clusterProps, clusterTopology));
}
// host group configs
for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) {
Map<String, Map<String, String>> hgConfigProps = groupInfo.getConfiguration().getProperties();
Map<String, String> hgTypeMap = hgConfigProps.get(type);
if (hgTypeMap != null && hgTypeMap.containsKey(propertyName)) {
requiredHostGroups.addAll(updater.getRequiredHostGroups(
propertyName, hgTypeMap.get(propertyName), hgConfigProps, clusterTopology));
}
}
}
}
}
// Iterate through all user defined properties (blueprint + cluster template only, no stack defaults) that do not
// have a registered updater. These properties can reference host groups too which should be extracted by the default
// updater
Set<Pair<String, String>> propertiesWithUpdaters = getAllPropertiesWithUpdaters(updaters);
// apply default updater on cluster config
Map<String, Map<String, String>> userDefinedClusterProperties = clusterTopology.getConfiguration().getFullProperties(1);
addRequiredHostgroupsByDefaultUpdater(userDefinedClusterProperties, propertiesWithUpdaters, requiredHostGroups);
// apply default updater on hostgroup configs
clusterTopology.getHostGroupInfo().values().stream().forEach(
hostGroup -> {
Configuration hostGroupConfig = hostGroup.getConfiguration();
Map<String, Map<String, String>> hostGroupConfigProps = hostGroupConfig.getFullProperties(1);
addRequiredHostgroupsByDefaultUpdater(hostGroupConfigProps, propertiesWithUpdaters, requiredHostGroups);
});
return requiredHostGroups;
} |
223551095_1215 | public Set<String> getRequiredHostGroups() {
Set<String> requiredHostGroups = new HashSet<>();
Collection<Map<String, Map<String, PropertyUpdater>>> updaters = createCollectionOfUpdaters();
// Iterate all registered updaters and collect host groups referenced by related properties and
// extracted by the updaters
for (Map<String, Map<String, PropertyUpdater>> updaterMap : updaters) {
for (Map.Entry<String, Map<String, PropertyUpdater>> entry : updaterMap.entrySet()) {
String type = entry.getKey();
for (Map.Entry<String, PropertyUpdater> updaterEntry : entry.getValue().entrySet()) {
String propertyName = updaterEntry.getKey();
PropertyUpdater updater = updaterEntry.getValue();
// cluster scoped configuration which also includes all default and BP properties
Map<String, Map<String, String>> clusterProps = clusterTopology.getConfiguration().getFullProperties();
Map<String, String> typeMap = clusterProps.get(type);
if (typeMap != null && typeMap.containsKey(propertyName) && typeMap.get(propertyName) != null) {
requiredHostGroups.addAll(updater.getRequiredHostGroups(
propertyName, typeMap.get(propertyName), clusterProps, clusterTopology));
}
// host group configs
for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) {
Map<String, Map<String, String>> hgConfigProps = groupInfo.getConfiguration().getProperties();
Map<String, String> hgTypeMap = hgConfigProps.get(type);
if (hgTypeMap != null && hgTypeMap.containsKey(propertyName)) {
requiredHostGroups.addAll(updater.getRequiredHostGroups(
propertyName, hgTypeMap.get(propertyName), hgConfigProps, clusterTopology));
}
}
}
}
}
// Iterate through all user defined properties (blueprint + cluster template only, no stack defaults) that do not
// have a registered updater. These properties can reference host groups too which should be extracted by the default
// updater
Set<Pair<String, String>> propertiesWithUpdaters = getAllPropertiesWithUpdaters(updaters);
// apply default updater on cluster config
Map<String, Map<String, String>> userDefinedClusterProperties = clusterTopology.getConfiguration().getFullProperties(1);
addRequiredHostgroupsByDefaultUpdater(userDefinedClusterProperties, propertiesWithUpdaters, requiredHostGroups);
// apply default updater on hostgroup configs
clusterTopology.getHostGroupInfo().values().stream().forEach(
hostGroup -> {
Configuration hostGroupConfig = hostGroup.getConfiguration();
Map<String, Map<String, String>> hostGroupConfigProps = hostGroupConfig.getFullProperties(1);
addRequiredHostgroupsByDefaultUpdater(hostGroupConfigProps, propertiesWithUpdaters, requiredHostGroups);
});
return requiredHostGroups;
} |
223551095_1216 | public Set<String> getRequiredHostGroups() {
Set<String> requiredHostGroups = new HashSet<>();
Collection<Map<String, Map<String, PropertyUpdater>>> updaters = createCollectionOfUpdaters();
// Iterate all registered updaters and collect host groups referenced by related properties and
// extracted by the updaters
for (Map<String, Map<String, PropertyUpdater>> updaterMap : updaters) {
for (Map.Entry<String, Map<String, PropertyUpdater>> entry : updaterMap.entrySet()) {
String type = entry.getKey();
for (Map.Entry<String, PropertyUpdater> updaterEntry : entry.getValue().entrySet()) {
String propertyName = updaterEntry.getKey();
PropertyUpdater updater = updaterEntry.getValue();
// cluster scoped configuration which also includes all default and BP properties
Map<String, Map<String, String>> clusterProps = clusterTopology.getConfiguration().getFullProperties();
Map<String, String> typeMap = clusterProps.get(type);
if (typeMap != null && typeMap.containsKey(propertyName) && typeMap.get(propertyName) != null) {
requiredHostGroups.addAll(updater.getRequiredHostGroups(
propertyName, typeMap.get(propertyName), clusterProps, clusterTopology));
}
// host group configs
for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) {
Map<String, Map<String, String>> hgConfigProps = groupInfo.getConfiguration().getProperties();
Map<String, String> hgTypeMap = hgConfigProps.get(type);
if (hgTypeMap != null && hgTypeMap.containsKey(propertyName)) {
requiredHostGroups.addAll(updater.getRequiredHostGroups(
propertyName, hgTypeMap.get(propertyName), hgConfigProps, clusterTopology));
}
}
}
}
}
// Iterate through all user defined properties (blueprint + cluster template only, no stack defaults) that do not
// have a registered updater. These properties can reference host groups too which should be extracted by the default
// updater
Set<Pair<String, String>> propertiesWithUpdaters = getAllPropertiesWithUpdaters(updaters);
// apply default updater on cluster config
Map<String, Map<String, String>> userDefinedClusterProperties = clusterTopology.getConfiguration().getFullProperties(1);
addRequiredHostgroupsByDefaultUpdater(userDefinedClusterProperties, propertiesWithUpdaters, requiredHostGroups);
// apply default updater on hostgroup configs
clusterTopology.getHostGroupInfo().values().stream().forEach(
hostGroup -> {
Configuration hostGroupConfig = hostGroup.getConfiguration();
Map<String, Map<String, String>> hostGroupConfigProps = hostGroupConfig.getFullProperties(1);
addRequiredHostgroupsByDefaultUpdater(hostGroupConfigProps, propertiesWithUpdaters, requiredHostGroups);
});
return requiredHostGroups;
} |
223551095_1217 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1218 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1219 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1220 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1221 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1222 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1223 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1224 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1225 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1226 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1227 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1228 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1229 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1230 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1231 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1232 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1233 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1234 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1235 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1236 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1237 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1238 | public void doUpdateForBlueprintExport() {
// HA configs are only processed in cluster configuration, not HG configurations
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdate();
}
if (clusterTopology.isYarnResourceManagerHAEnabled()) {
doYarnResourceManagerHAUpdate();
}
if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) {
doOozieServerHAUpdate();
}
Collection<Configuration> allConfigs = new ArrayList<>();
allConfigs.add(clusterTopology.getConfiguration());
for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) {
Configuration hgConfiguration = groupInfo.getConfiguration();
if (! hgConfiguration.getFullProperties(1).isEmpty()) {
// create new configuration which only contains properties specified in host group and BP host group
allConfigs.add(new Configuration(hgConfiguration.getProperties(), null,
new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null)));
}
}
for (Configuration configuration : allConfigs) {
doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration);
doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration);
doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration);
doNonTopologyUpdate(nonTopologyUpdaters, configuration);
doRemovePropertyExport(removePropertyUpdaters, configuration);
doFilterPriorToExport(configuration);
}
} |
223551095_1239 | public void doUpdateForBlueprintExport() {
// HA configs are only processed in cluster configuration, not HG configurations
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdate();
}
if (clusterTopology.isYarnResourceManagerHAEnabled()) {
doYarnResourceManagerHAUpdate();
}
if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) {
doOozieServerHAUpdate();
}
Collection<Configuration> allConfigs = new ArrayList<>();
allConfigs.add(clusterTopology.getConfiguration());
for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) {
Configuration hgConfiguration = groupInfo.getConfiguration();
if (! hgConfiguration.getFullProperties(1).isEmpty()) {
// create new configuration which only contains properties specified in host group and BP host group
allConfigs.add(new Configuration(hgConfiguration.getProperties(), null,
new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null)));
}
}
for (Configuration configuration : allConfigs) {
doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration);
doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration);
doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration);
doNonTopologyUpdate(nonTopologyUpdaters, configuration);
doRemovePropertyExport(removePropertyUpdaters, configuration);
doFilterPriorToExport(configuration);
}
} |
223551095_1240 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1241 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1242 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1243 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1244 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1245 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1246 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1247 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1248 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1249 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1250 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1251 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1252 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1253 | public void doUpdateForBlueprintExport() {
// HA configs are only processed in cluster configuration, not HG configurations
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdate();
}
if (clusterTopology.isYarnResourceManagerHAEnabled()) {
doYarnResourceManagerHAUpdate();
}
if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) {
doOozieServerHAUpdate();
}
Collection<Configuration> allConfigs = new ArrayList<>();
allConfigs.add(clusterTopology.getConfiguration());
for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) {
Configuration hgConfiguration = groupInfo.getConfiguration();
if (! hgConfiguration.getFullProperties(1).isEmpty()) {
// create new configuration which only contains properties specified in host group and BP host group
allConfigs.add(new Configuration(hgConfiguration.getProperties(), null,
new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null)));
}
}
for (Configuration configuration : allConfigs) {
doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration);
doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration);
doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration);
doNonTopologyUpdate(nonTopologyUpdaters, configuration);
doRemovePropertyExport(removePropertyUpdaters, configuration);
doFilterPriorToExport(configuration);
}
} |
223551095_1254 | public void doUpdateForBlueprintExport() {
// HA configs are only processed in cluster configuration, not HG configurations
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdate();
}
if (clusterTopology.isYarnResourceManagerHAEnabled()) {
doYarnResourceManagerHAUpdate();
}
if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) {
doOozieServerHAUpdate();
}
Collection<Configuration> allConfigs = new ArrayList<>();
allConfigs.add(clusterTopology.getConfiguration());
for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) {
Configuration hgConfiguration = groupInfo.getConfiguration();
if (! hgConfiguration.getFullProperties(1).isEmpty()) {
// create new configuration which only contains properties specified in host group and BP host group
allConfigs.add(new Configuration(hgConfiguration.getProperties(), null,
new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null)));
}
}
for (Configuration configuration : allConfigs) {
doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration);
doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration);
doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration);
doNonTopologyUpdate(nonTopologyUpdaters, configuration);
doRemovePropertyExport(removePropertyUpdaters, configuration);
doFilterPriorToExport(configuration);
}
} |
223551095_1255 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1256 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1257 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1258 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1259 | public void doUpdateForBlueprintExport() {
// HA configs are only processed in cluster configuration, not HG configurations
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdate();
}
if (clusterTopology.isYarnResourceManagerHAEnabled()) {
doYarnResourceManagerHAUpdate();
}
if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) {
doOozieServerHAUpdate();
}
Collection<Configuration> allConfigs = new ArrayList<>();
allConfigs.add(clusterTopology.getConfiguration());
for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) {
Configuration hgConfiguration = groupInfo.getConfiguration();
if (! hgConfiguration.getFullProperties(1).isEmpty()) {
// create new configuration which only contains properties specified in host group and BP host group
allConfigs.add(new Configuration(hgConfiguration.getProperties(), null,
new Configuration(hgConfiguration.getParentConfiguration().getProperties(), null)));
}
}
for (Configuration configuration : allConfigs) {
doSingleHostExportUpdate(singleHostTopologyUpdaters, configuration);
doSingleHostExportUpdate(dbHostTopologyUpdaters, configuration);
doMultiHostExportUpdate(multiHostTopologyUpdaters, configuration);
doNonTopologyUpdate(nonTopologyUpdaters, configuration);
doRemovePropertyExport(removePropertyUpdaters, configuration);
doFilterPriorToExport(configuration);
}
} |
223551095_1260 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1261 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |
223551095_1262 | public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
Set<String> configTypesUpdated = new HashSet<>();
Configuration clusterConfig = clusterTopology.getConfiguration();
doRecommendConfigurations(clusterConfig, configTypesUpdated);
// filter out any properties that should not be included, based on the dependencies
// specified in the stacks, and the filters defined in this class
doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
if (!propertiesMoved.isEmpty()) {
configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
}
// this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
doGeneralPropertyUpdatesForClusterCreate(clusterConfig, clusterProps, configTypesUpdated);
//todo: lots of hard coded HA rules included here
if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdateOnClusterCreation(clusterConfig, clusterProps, configTypesUpdated);
}
// Explicitly set any properties that are required but not currently provided in the stack definition.
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
return configTypesUpdated;
} |