id
stringlengths
7
14
text
stringlengths
1
106k
223551095_1796
@Override public Result finalizeResult(Result queryResult) { TreeNode<Resource> resultTree = queryResult.getResultTree(); Result result = new ResultImpl(true); TreeNode<Resource> blueprintResultTree = result.getResultTree(); if (isCollection(resultTree)) { blueprintResultTree.setProperty("isCollection", "true"); } for (TreeNode<Resource> node : resultTree.getChildren()) { Resource blueprintResource = createBlueprintResource(node); blueprintResultTree.addChild(new TreeNodeImpl<>( blueprintResultTree, blueprintResource, node.getName())); } return result; }
223551095_1797
@Override public Result finalizeResult(Result queryResult) { TreeNode<Resource> resultTree = queryResult.getResultTree(); Result result = new ResultImpl(true); TreeNode<Resource> blueprintResultTree = result.getResultTree(); if (isCollection(resultTree)) { blueprintResultTree.setProperty("isCollection", "true"); } for (TreeNode<Resource> node : resultTree.getChildren()) { Resource blueprintResource = createBlueprintResource(node); blueprintResultTree.addChild(new TreeNodeImpl<>( blueprintResultTree, blueprintResource, node.getName())); } return result; }
223551095_1798
@Override public Result finalizeResult(Result queryResult) { TreeNode<Resource> resultTree = queryResult.getResultTree(); Result result = new ResultImpl(true); TreeNode<Resource> blueprintResultTree = result.getResultTree(); if (isCollection(resultTree)) { blueprintResultTree.setProperty("isCollection", "true"); } for (TreeNode<Resource> node : resultTree.getChildren()) { Resource blueprintResource = createBlueprintResource(node); blueprintResultTree.addChild(new TreeNodeImpl<>( blueprintResultTree, blueprintResource, node.getName())); } return result; }
223551095_1800
public Predicate getExtendedPredicate() { return lastVisited; }
223551095_1801
@Override public boolean isCollectionResource() { return getKeyValueMap().get(getResourceDefinition().getType()) == null; }
223551095_1802
@Override public boolean isCollectionResource() { return getKeyValueMap().get(getResourceDefinition().getType()) == null; }
223551095_1803
@Override public Result execute() throws UnsupportedPropertyException, SystemException, NoSuchResourceException, NoSuchParentResourceException { queryForResources(); return getResult(null); }
223551095_1804
@Override public Result execute() throws UnsupportedPropertyException, SystemException, NoSuchResourceException, NoSuchParentResourceException { queryForResources(); return getResult(null); }
223551095_1805
protected Map<Resource, Set<Map<String, Object>>> getJoinedResourceProperties(Set<String> propertyIds, Resource parentResource, String category) throws SystemException, UnsupportedPropertyException, NoSuchParentResourceException, NoSuchResourceException { Map<Resource, Set<Map<String, Object>>> resourcePropertyMaps = new HashMap<>(); Map<String, String> categoryPropertyIdMap = getPropertyIdsForCategory(propertyIds, category); for (Map.Entry<Resource, QueryResult> queryResultEntry : populatedQueryResults.entrySet()) { QueryResult queryResult = queryResultEntry.getValue(); Resource queryParentResource = queryResultEntry.getKey(); // for each resource for the given parent ... if (queryParentResource == parentResource) { Iterable<Resource> iterResource = clusterController.getIterable( resourceDefinition.getType(), queryResult.getQueryResponse(), queryResult.getRequest(), queryResult.getPredicate(), null, null); for (Resource resource : iterResource) { // get the resource properties Map<String, Object> resourcePropertyMap = new HashMap<>(); for (Map.Entry<String, String> categoryPropertyIdEntry : categoryPropertyIdMap.entrySet()) { Object value = resource.getPropertyValue(categoryPropertyIdEntry.getValue()); if (value != null) { resourcePropertyMap.put(categoryPropertyIdEntry.getKey(), value); } } Set<Map<String, Object>> propertyMaps = new HashSet<>(); // For each sub category get the property maps for the sub resources for (Map.Entry<String, QueryImpl> entry : requestedSubResources.entrySet()) { String subResourceCategory = category == null ? entry.getKey() : category + "/" + entry.getKey(); QueryImpl subResource = entry.getValue(); Map<Resource, Set<Map<String, Object>>> subResourcePropertyMaps = subResource.getJoinedResourceProperties(propertyIds, resource, subResourceCategory); Set<Map<String, Object>> combinedSubResourcePropertyMaps = new HashSet<>(); for (Set<Map<String, Object>> maps : subResourcePropertyMaps.values()) { combinedSubResourcePropertyMaps.addAll(maps); } propertyMaps = joinPropertyMaps(propertyMaps, combinedSubResourcePropertyMaps); } // add parent resource properties to joinedResources if (!resourcePropertyMap.isEmpty()) { if (propertyMaps.isEmpty()) { propertyMaps.add(resourcePropertyMap); } else { for (Map<String, Object> propertyMap : propertyMaps) { propertyMap.putAll(resourcePropertyMap); } } } resourcePropertyMaps.put(resource, propertyMaps); } } } return resourcePropertyMaps; }
223551095_1806
@Override public Result execute() throws UnsupportedPropertyException, SystemException, NoSuchResourceException, NoSuchParentResourceException { queryForResources(); return getResult(null); }
223551095_1807
@Override public Result execute() throws UnsupportedPropertyException, SystemException, NoSuchResourceException, NoSuchParentResourceException { queryForResources(); return getResult(null); }
223551095_1809
@Override public Result handleRequest(Request request) { Query query = request.getResource().getQuery(); query.setPageRequest(request.getPageRequest()); query.setSortRequest(request.getSortRequest()); query.setRenderer(request.getRenderer()); // If the request body exists, copy the requstInfoProperties from it. This map should contain // the _directives_ specified in the request. RequestBody body = request.getBody(); if(body != null) { query.setRequestInfoProps(body.getRequestInfoProperties()); } try { addFieldsToQuery(request, query); } catch (IllegalArgumentException e) { return new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage())); } Result result; Predicate p = null; try { p = request.getQueryPredicate(); query.setUserPredicate(p); result = query.execute(); result.setResultStatus(new ResultStatus(ResultStatus.STATUS.OK)); } catch (AuthorizationException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.FORBIDDEN, e.getMessage())); } catch (SystemException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e)); } catch (NoSuchParentResourceException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage())); } catch (UnsupportedPropertyException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage())); } catch (NoSuchResourceException e) { if (p == null) { // no predicate specified, resource requested by id result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage())); } else { // resource(s) requested using predicate result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.OK, e)); result.getResultTree().setProperty("isCollection", "true"); } } catch (IllegalArgumentException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, "Invalid Request: " + e.getMessage())); LOG.error("Bad request: ", e); } catch (RuntimeException e) { if (LOG.isErrorEnabled()) { LOG.error("Caught a runtime exception executing a query", e); } //result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e)); throw e; } return result; }
223551095_1810
@Override public Result handleRequest(Request request) { Query query = request.getResource().getQuery(); query.setPageRequest(request.getPageRequest()); query.setSortRequest(request.getSortRequest()); query.setRenderer(request.getRenderer()); // If the request body exists, copy the requstInfoProperties from it. This map should contain // the _directives_ specified in the request. RequestBody body = request.getBody(); if(body != null) { query.setRequestInfoProps(body.getRequestInfoProperties()); } try { addFieldsToQuery(request, query); } catch (IllegalArgumentException e) { return new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage())); } Result result; Predicate p = null; try { p = request.getQueryPredicate(); query.setUserPredicate(p); result = query.execute(); result.setResultStatus(new ResultStatus(ResultStatus.STATUS.OK)); } catch (AuthorizationException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.FORBIDDEN, e.getMessage())); } catch (SystemException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e)); } catch (NoSuchParentResourceException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage())); } catch (UnsupportedPropertyException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage())); } catch (NoSuchResourceException e) { if (p == null) { // no predicate specified, resource requested by id result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage())); } else { // resource(s) requested using predicate result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.OK, e)); result.getResultTree().setProperty("isCollection", "true"); } } catch (IllegalArgumentException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, "Invalid Request: " + e.getMessage())); LOG.error("Bad request: ", e); } catch (RuntimeException e) { if (LOG.isErrorEnabled()) { LOG.error("Caught a runtime exception executing a query", e); } //result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e)); throw e; } return result; }
223551095_1811
@Override public Result handleRequest(Request request) { Query query = request.getResource().getQuery(); query.setPageRequest(request.getPageRequest()); query.setSortRequest(request.getSortRequest()); query.setRenderer(request.getRenderer()); // If the request body exists, copy the requstInfoProperties from it. This map should contain // the _directives_ specified in the request. RequestBody body = request.getBody(); if(body != null) { query.setRequestInfoProps(body.getRequestInfoProperties()); } try { addFieldsToQuery(request, query); } catch (IllegalArgumentException e) { return new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage())); } Result result; Predicate p = null; try { p = request.getQueryPredicate(); query.setUserPredicate(p); result = query.execute(); result.setResultStatus(new ResultStatus(ResultStatus.STATUS.OK)); } catch (AuthorizationException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.FORBIDDEN, e.getMessage())); } catch (SystemException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e)); } catch (NoSuchParentResourceException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage())); } catch (UnsupportedPropertyException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage())); } catch (NoSuchResourceException e) { if (p == null) { // no predicate specified, resource requested by id result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage())); } else { // resource(s) requested using predicate result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.OK, e)); result.getResultTree().setProperty("isCollection", "true"); } } catch (IllegalArgumentException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, "Invalid Request: " + e.getMessage())); LOG.error("Bad request: ", e); } catch (RuntimeException e) { if (LOG.isErrorEnabled()) { LOG.error("Caught a runtime exception executing a query", e); } //result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e)); throw e; } return result; }
223551095_1812
@Override public Result handleRequest(Request request) { Query query = request.getResource().getQuery(); query.setPageRequest(request.getPageRequest()); query.setSortRequest(request.getSortRequest()); query.setRenderer(request.getRenderer()); // If the request body exists, copy the requstInfoProperties from it. This map should contain // the _directives_ specified in the request. RequestBody body = request.getBody(); if(body != null) { query.setRequestInfoProps(body.getRequestInfoProperties()); } try { addFieldsToQuery(request, query); } catch (IllegalArgumentException e) { return new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage())); } Result result; Predicate p = null; try { p = request.getQueryPredicate(); query.setUserPredicate(p); result = query.execute(); result.setResultStatus(new ResultStatus(ResultStatus.STATUS.OK)); } catch (AuthorizationException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.FORBIDDEN, e.getMessage())); } catch (SystemException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e)); } catch (NoSuchParentResourceException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage())); } catch (UnsupportedPropertyException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage())); } catch (NoSuchResourceException e) { if (p == null) { // no predicate specified, resource requested by id result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage())); } else { // resource(s) requested using predicate result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.OK, e)); result.getResultTree().setProperty("isCollection", "true"); } } catch (IllegalArgumentException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, "Invalid Request: " + e.getMessage())); LOG.error("Bad request: ", e); } catch (RuntimeException e) { if (LOG.isErrorEnabled()) { LOG.error("Caught a runtime exception executing a query", e); } //result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e)); throw e; } return result; }
223551095_1813
@Override public Result handleRequest(Request request) { Query query = request.getResource().getQuery(); query.setPageRequest(request.getPageRequest()); query.setSortRequest(request.getSortRequest()); query.setRenderer(request.getRenderer()); // If the request body exists, copy the requstInfoProperties from it. This map should contain // the _directives_ specified in the request. RequestBody body = request.getBody(); if(body != null) { query.setRequestInfoProps(body.getRequestInfoProperties()); } try { addFieldsToQuery(request, query); } catch (IllegalArgumentException e) { return new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage())); } Result result; Predicate p = null; try { p = request.getQueryPredicate(); query.setUserPredicate(p); result = query.execute(); result.setResultStatus(new ResultStatus(ResultStatus.STATUS.OK)); } catch (AuthorizationException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.FORBIDDEN, e.getMessage())); } catch (SystemException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e)); } catch (NoSuchParentResourceException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage())); } catch (UnsupportedPropertyException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage())); } catch (NoSuchResourceException e) { if (p == null) { // no predicate specified, resource requested by id result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage())); } else { // resource(s) requested using predicate result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.OK, e)); result.getResultTree().setProperty("isCollection", "true"); } } catch (IllegalArgumentException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, "Invalid Request: " + e.getMessage())); LOG.error("Bad request: ", e); } catch (RuntimeException e) { if (LOG.isErrorEnabled()) { LOG.error("Caught a runtime exception executing a query", e); } //result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e)); throw e; } return result; }
223551095_1814
@Override public Result handleRequest(Request request) { Query query = request.getResource().getQuery(); query.setPageRequest(request.getPageRequest()); query.setSortRequest(request.getSortRequest()); query.setRenderer(request.getRenderer()); // If the request body exists, copy the requstInfoProperties from it. This map should contain // the _directives_ specified in the request. RequestBody body = request.getBody(); if(body != null) { query.setRequestInfoProps(body.getRequestInfoProperties()); } try { addFieldsToQuery(request, query); } catch (IllegalArgumentException e) { return new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage())); } Result result; Predicate p = null; try { p = request.getQueryPredicate(); query.setUserPredicate(p); result = query.execute(); result.setResultStatus(new ResultStatus(ResultStatus.STATUS.OK)); } catch (AuthorizationException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.FORBIDDEN, e.getMessage())); } catch (SystemException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e)); } catch (NoSuchParentResourceException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage())); } catch (UnsupportedPropertyException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage())); } catch (NoSuchResourceException e) { if (p == null) { // no predicate specified, resource requested by id result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage())); } else { // resource(s) requested using predicate result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.OK, e)); result.getResultTree().setProperty("isCollection", "true"); } } catch (IllegalArgumentException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, "Invalid Request: " + e.getMessage())); LOG.error("Bad request: ", e); } catch (RuntimeException e) { if (LOG.isErrorEnabled()) { LOG.error("Caught a runtime exception executing a query", e); } //result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e)); throw e; } return result; }
223551095_1815
@Override public Result handleRequest(Request request) { Query query = request.getResource().getQuery(); query.setPageRequest(request.getPageRequest()); query.setSortRequest(request.getSortRequest()); query.setRenderer(request.getRenderer()); // If the request body exists, copy the requstInfoProperties from it. This map should contain // the _directives_ specified in the request. RequestBody body = request.getBody(); if(body != null) { query.setRequestInfoProps(body.getRequestInfoProperties()); } try { addFieldsToQuery(request, query); } catch (IllegalArgumentException e) { return new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage())); } Result result; Predicate p = null; try { p = request.getQueryPredicate(); query.setUserPredicate(p); result = query.execute(); result.setResultStatus(new ResultStatus(ResultStatus.STATUS.OK)); } catch (AuthorizationException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.FORBIDDEN, e.getMessage())); } catch (SystemException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e)); } catch (NoSuchParentResourceException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage())); } catch (UnsupportedPropertyException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage())); } catch (NoSuchResourceException e) { if (p == null) { // no predicate specified, resource requested by id result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage())); } else { // resource(s) requested using predicate result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.OK, e)); result.getResultTree().setProperty("isCollection", "true"); } } catch (IllegalArgumentException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, "Invalid Request: " + e.getMessage())); LOG.error("Bad request: ", e); } catch (RuntimeException e) { if (LOG.isErrorEnabled()) { LOG.error("Caught a runtime exception executing a query", e); } //result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e)); throw e; } return result; }
223551095_1816
@Override public Result handleRequest(Request request) { Query query = request.getResource().getQuery(); query.setPageRequest(request.getPageRequest()); query.setSortRequest(request.getSortRequest()); query.setRenderer(request.getRenderer()); // If the request body exists, copy the requstInfoProperties from it. This map should contain // the _directives_ specified in the request. RequestBody body = request.getBody(); if(body != null) { query.setRequestInfoProps(body.getRequestInfoProperties()); } try { addFieldsToQuery(request, query); } catch (IllegalArgumentException e) { return new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage())); } Result result; Predicate p = null; try { p = request.getQueryPredicate(); query.setUserPredicate(p); result = query.execute(); result.setResultStatus(new ResultStatus(ResultStatus.STATUS.OK)); } catch (AuthorizationException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.FORBIDDEN, e.getMessage())); } catch (SystemException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e)); } catch (NoSuchParentResourceException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage())); } catch (UnsupportedPropertyException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage())); } catch (NoSuchResourceException e) { if (p == null) { // no predicate specified, resource requested by id result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage())); } else { // resource(s) requested using predicate result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.OK, e)); result.getResultTree().setProperty("isCollection", "true"); } } catch (IllegalArgumentException e) { result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, "Invalid Request: " + e.getMessage())); LOG.error("Bad request: ", e); } catch (RuntimeException e) { if (LOG.isErrorEnabled()) { LOG.error("Caught a runtime exception executing a query", e); } //result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e)); throw e; } return result; }
223551095_1817
@Override public Result handleRequest(Request request) { Result queryResult = getReadHandler().handleRequest(request); if (queryResult.getStatus().isErrorState() || queryResult.getResultTree().getChildren().isEmpty()) { // if query result has error state or contains no resources just return it. // currently returns 200 for case where query returns no rows return queryResult; } Map<Resource.Type, Set<Map<String, Object>>> mapProperties; try { mapProperties = buildCreateSet(request, queryResult); } catch (IllegalArgumentException e) { return createInvalidRequestResult(e.getMessage()); } if (mapProperties.size() != 1) { return createInvalidRequestResult(mapProperties.size() == 0 ? "A minimum of one sub-resource must be specified for creation." : "Multiple sub-resource types may not be created in the same request."); } // only get first element because we currently only support creation of a single sub-resource type final Map.Entry<Resource.Type, Set<Map<String, Object>>> entry = mapProperties.entrySet().iterator().next(); ResourceInstance createResource = getResourceFactory().createResource( entry.getKey(), request.getResource().getKeyValueMap()); RequestBody requestBody = new RequestBody(); requestBody.setBody(request.getBody().getBody()); for (Map<String, Object> map : entry.getValue()) { requestBody.addPropertySet(new NamedPropertySet("", map)); } return persist(createResource, requestBody ); }
223551095_1818
@Override public Result handleRequest(Request request) { Result queryResult = getReadHandler().handleRequest(request); if (queryResult.getStatus().isErrorState() || queryResult.getResultTree().getChildren().isEmpty()) { // if query result has error state or contains no resources just return it. // currently returns 200 for case where query returns no rows return queryResult; } Map<Resource.Type, Set<Map<String, Object>>> mapProperties; try { mapProperties = buildCreateSet(request, queryResult); } catch (IllegalArgumentException e) { return createInvalidRequestResult(e.getMessage()); } if (mapProperties.size() != 1) { return createInvalidRequestResult(mapProperties.size() == 0 ? "A minimum of one sub-resource must be specified for creation." : "Multiple sub-resource types may not be created in the same request."); } // only get first element because we currently only support creation of a single sub-resource type final Map.Entry<Resource.Type, Set<Map<String, Object>>> entry = mapProperties.entrySet().iterator().next(); ResourceInstance createResource = getResourceFactory().createResource( entry.getKey(), request.getResource().getKeyValueMap()); RequestBody requestBody = new RequestBody(); requestBody.setBody(request.getBody().getBody()); for (Map<String, Object> map : entry.getValue()) { requestBody.addPropertySet(new NamedPropertySet("", map)); } return persist(createResource, requestBody ); }
223551095_1819
@Override public Result handleRequest(Request request) { Result queryResult = getReadHandler().handleRequest(request); if (queryResult.getStatus().isErrorState() || queryResult.getResultTree().getChildren().isEmpty()) { // if query result has error state or contains no resources just return it. // currently returns 200 for case where query returns no rows return queryResult; } Map<Resource.Type, Set<Map<String, Object>>> mapProperties; try { mapProperties = buildCreateSet(request, queryResult); } catch (IllegalArgumentException e) { return createInvalidRequestResult(e.getMessage()); } if (mapProperties.size() != 1) { return createInvalidRequestResult(mapProperties.size() == 0 ? "A minimum of one sub-resource must be specified for creation." : "Multiple sub-resource types may not be created in the same request."); } // only get first element because we currently only support creation of a single sub-resource type final Map.Entry<Resource.Type, Set<Map<String, Object>>> entry = mapProperties.entrySet().iterator().next(); ResourceInstance createResource = getResourceFactory().createResource( entry.getKey(), request.getResource().getKeyValueMap()); RequestBody requestBody = new RequestBody(); requestBody.setBody(request.getBody().getBody()); for (Map<String, Object> map : entry.getValue()) { requestBody.addPropertySet(new NamedPropertySet("", map)); } return persist(createResource, requestBody ); }
223551095_1820
@Override public Result handleRequest(Request request) { Result queryResult = getReadHandler().handleRequest(request); if (queryResult.getStatus().isErrorState() || queryResult.getResultTree().getChildren().isEmpty()) { // if query result has error state or contains no resources just return it. // currently returns 200 for case where query returns no rows return queryResult; } Map<Resource.Type, Set<Map<String, Object>>> mapProperties; try { mapProperties = buildCreateSet(request, queryResult); } catch (IllegalArgumentException e) { return createInvalidRequestResult(e.getMessage()); } if (mapProperties.size() != 1) { return createInvalidRequestResult(mapProperties.size() == 0 ? "A minimum of one sub-resource must be specified for creation." : "Multiple sub-resource types may not be created in the same request."); } // only get first element because we currently only support creation of a single sub-resource type final Map.Entry<Resource.Type, Set<Map<String, Object>>> entry = mapProperties.entrySet().iterator().next(); ResourceInstance createResource = getResourceFactory().createResource( entry.getKey(), request.getResource().getKeyValueMap()); RequestBody requestBody = new RequestBody(); requestBody.setBody(request.getBody().getBody()); for (Map<String, Object> map : entry.getValue()) { requestBody.addPropertySet(new NamedPropertySet("", map)); } return persist(createResource, requestBody ); }
223551095_1821
@Override public Result handleRequest(Request request) { Result queryResult = getReadHandler().handleRequest(request); if (queryResult.getStatus().isErrorState() || queryResult.getResultTree().getChildren().isEmpty()) { // if query result has error state or contains no resources just return it. // currently returns 200 for case where query returns no rows return queryResult; } Map<Resource.Type, Set<Map<String, Object>>> mapProperties; try { mapProperties = buildCreateSet(request, queryResult); } catch (IllegalArgumentException e) { return createInvalidRequestResult(e.getMessage()); } if (mapProperties.size() != 1) { return createInvalidRequestResult(mapProperties.size() == 0 ? "A minimum of one sub-resource must be specified for creation." : "Multiple sub-resource types may not be created in the same request."); } // only get first element because we currently only support creation of a single sub-resource type final Map.Entry<Resource.Type, Set<Map<String, Object>>> entry = mapProperties.entrySet().iterator().next(); ResourceInstance createResource = getResourceFactory().createResource( entry.getKey(), request.getResource().getKeyValueMap()); RequestBody requestBody = new RequestBody(); requestBody.setBody(request.getBody().getBody()); for (Map<String, Object> map : entry.getValue()) { requestBody.addPropertySet(new NamedPropertySet("", map)); } return persist(createResource, requestBody ); }
223551095_1822
@Override public Result handleRequest(Request request) { Result queryResult = getReadHandler().handleRequest(request); if (queryResult.getStatus().isErrorState() || queryResult.getResultTree().getChildren().isEmpty()) { // if query result has error state or contains no resources just return it. // currently returns 200 for case where query returns no rows return queryResult; } Map<Resource.Type, Set<Map<String, Object>>> mapProperties; try { mapProperties = buildCreateSet(request, queryResult); } catch (IllegalArgumentException e) { return createInvalidRequestResult(e.getMessage()); } if (mapProperties.size() != 1) { return createInvalidRequestResult(mapProperties.size() == 0 ? "A minimum of one sub-resource must be specified for creation." : "Multiple sub-resource types may not be created in the same request."); } // only get first element because we currently only support creation of a single sub-resource type final Map.Entry<Resource.Type, Set<Map<String, Object>>> entry = mapProperties.entrySet().iterator().next(); ResourceInstance createResource = getResourceFactory().createResource( entry.getKey(), request.getResource().getKeyValueMap()); RequestBody requestBody = new RequestBody(); requestBody.setBody(request.getBody().getBody()); for (Map<String, Object> map : entry.getValue()) { requestBody.addPropertySet(new NamedPropertySet("", map)); } return persist(createResource, requestBody ); }
223551095_1827
@Override public Predicate toPredicate(String prop, String val) { return new LessEqualsPredicate<>(prop, val); }
223551095_1830
@Override public Predicate toPredicate(String prop, String val) { return new GreaterPredicate<>(prop, val); }
223551095_1836
@Override public Predicate toPredicate(String prop, String val) { return new GreaterEqualsPredicate<>(prop, val); }
223551095_1839
@Override public Predicate toPredicate(Predicate left, Predicate right) { //todo: refactor to not need down casts return new AndPredicate(left, right); }
223551095_1843
@Override public Predicate toPredicate(Predicate left, Predicate right) { //todo: refactor to remove down casts return new OrPredicate(left, right); }
223551095_1847
@Override public Predicate toPredicate(String prop, String val) { return new NotPredicate(new EqualsPredicate<>(prop, val)); }
223551095_1850
@Override public Predicate toPredicate(String prop, String val) { return new EqualsPredicate<>(prop, val); }
223551095_1853
@Override public Predicate toPredicate(String prop, String val) throws InvalidQueryException { if (val == null) { throw new InvalidQueryException("IN operator is missing a required right operand for property " + prop); } String[] tokens = val.split(","); List<EqualsPredicate> listPredicates = new ArrayList<>(); for (String token : tokens) { listPredicates.add(new EqualsPredicate<>(prop, token.trim())); } return listPredicates.size() == 1 ? listPredicates.get(0) : buildOrPredicate(listPredicates); }
223551095_1856
@Override public Predicate toPredicate(String prop, String val) { return new LessPredicate<>(prop, val); }
223551095_1859
@Override public Predicate toPredicate(Predicate left, Predicate right) { return new NotPredicate(right); }
223551095_1862
public Predicate parse(Token[] tokens) throws InvalidQueryException { ParseContext ctx = parseExpressions(tokens); List<Expression> listExpressions = ctx.getExpressions(); changeHostNameToLowerCase(listExpressions); List<Expression> listMergedExpressions = mergeExpressions(listExpressions, ctx.getMaxPrecedence()); return listMergedExpressions.isEmpty() ? null : listMergedExpressions.get(0).toPredicate(); }
223551095_1863
public Predicate parse(Token[] tokens) throws InvalidQueryException { ParseContext ctx = parseExpressions(tokens); List<Expression> listExpressions = ctx.getExpressions(); changeHostNameToLowerCase(listExpressions); List<Expression> listMergedExpressions = mergeExpressions(listExpressions, ctx.getMaxPrecedence()); return listMergedExpressions.isEmpty() ? null : listMergedExpressions.get(0).toPredicate(); }
223551095_1864
public Predicate parse(Token[] tokens) throws InvalidQueryException { ParseContext ctx = parseExpressions(tokens); List<Expression> listExpressions = ctx.getExpressions(); changeHostNameToLowerCase(listExpressions); List<Expression> listMergedExpressions = mergeExpressions(listExpressions, ctx.getMaxPrecedence()); return listMergedExpressions.isEmpty() ? null : listMergedExpressions.get(0).toPredicate(); }
223551095_1865
public Predicate parse(Token[] tokens) throws InvalidQueryException { ParseContext ctx = parseExpressions(tokens); List<Expression> listExpressions = ctx.getExpressions(); changeHostNameToLowerCase(listExpressions); List<Expression> listMergedExpressions = mergeExpressions(listExpressions, ctx.getMaxPrecedence()); return listMergedExpressions.isEmpty() ? null : listMergedExpressions.get(0).toPredicate(); }
223551095_1866
public Predicate parse(Token[] tokens) throws InvalidQueryException { ParseContext ctx = parseExpressions(tokens); List<Expression> listExpressions = ctx.getExpressions(); changeHostNameToLowerCase(listExpressions); List<Expression> listMergedExpressions = mergeExpressions(listExpressions, ctx.getMaxPrecedence()); return listMergedExpressions.isEmpty() ? null : listMergedExpressions.get(0).toPredicate(); }
223551095_1867
public Predicate parse(Token[] tokens) throws InvalidQueryException { ParseContext ctx = parseExpressions(tokens); List<Expression> listExpressions = ctx.getExpressions(); changeHostNameToLowerCase(listExpressions); List<Expression> listMergedExpressions = mergeExpressions(listExpressions, ctx.getMaxPrecedence()); return listMergedExpressions.isEmpty() ? null : listMergedExpressions.get(0).toPredicate(); }
223551095_1868
public Predicate parse(Token[] tokens) throws InvalidQueryException { ParseContext ctx = parseExpressions(tokens); List<Expression> listExpressions = ctx.getExpressions(); changeHostNameToLowerCase(listExpressions); List<Expression> listMergedExpressions = mergeExpressions(listExpressions, ctx.getMaxPrecedence()); return listMergedExpressions.isEmpty() ? null : listMergedExpressions.get(0).toPredicate(); }
223551095_1869
public Predicate parse(Token[] tokens) throws InvalidQueryException { ParseContext ctx = parseExpressions(tokens); List<Expression> listExpressions = ctx.getExpressions(); changeHostNameToLowerCase(listExpressions); List<Expression> listMergedExpressions = mergeExpressions(listExpressions, ctx.getMaxPrecedence()); return listMergedExpressions.isEmpty() ? null : listMergedExpressions.get(0).toPredicate(); }
223551095_1870
public Predicate parse(Token[] tokens) throws InvalidQueryException { ParseContext ctx = parseExpressions(tokens); List<Expression> listExpressions = ctx.getExpressions(); changeHostNameToLowerCase(listExpressions); List<Expression> listMergedExpressions = mergeExpressions(listExpressions, ctx.getMaxPrecedence()); return listMergedExpressions.isEmpty() ? null : listMergedExpressions.get(0).toPredicate(); }
223551095_1871
public Predicate parse(Token[] tokens) throws InvalidQueryException { ParseContext ctx = parseExpressions(tokens); List<Expression> listExpressions = ctx.getExpressions(); changeHostNameToLowerCase(listExpressions); List<Expression> listMergedExpressions = mergeExpressions(listExpressions, ctx.getMaxPrecedence()); return listMergedExpressions.isEmpty() ? null : listMergedExpressions.get(0).toPredicate(); }
223551095_1872
public Predicate parse(Token[] tokens) throws InvalidQueryException { ParseContext ctx = parseExpressions(tokens); List<Expression> listExpressions = ctx.getExpressions(); changeHostNameToLowerCase(listExpressions); List<Expression> listMergedExpressions = mergeExpressions(listExpressions, ctx.getMaxPrecedence()); return listMergedExpressions.isEmpty() ? null : listMergedExpressions.get(0).toPredicate(); }
223551095_1873
public Predicate parse(Token[] tokens) throws InvalidQueryException { ParseContext ctx = parseExpressions(tokens); List<Expression> listExpressions = ctx.getExpressions(); changeHostNameToLowerCase(listExpressions); List<Expression> listMergedExpressions = mergeExpressions(listExpressions, ctx.getMaxPrecedence()); return listMergedExpressions.isEmpty() ? null : listMergedExpressions.get(0).toPredicate(); }
223551095_1874
public Predicate parse(Token[] tokens) throws InvalidQueryException { ParseContext ctx = parseExpressions(tokens); List<Expression> listExpressions = ctx.getExpressions(); changeHostNameToLowerCase(listExpressions); List<Expression> listMergedExpressions = mergeExpressions(listExpressions, ctx.getMaxPrecedence()); return listMergedExpressions.isEmpty() ? null : listMergedExpressions.get(0).toPredicate(); }
223551095_1875
public Predicate parse(Token[] tokens) throws InvalidQueryException { ParseContext ctx = parseExpressions(tokens); List<Expression> listExpressions = ctx.getExpressions(); changeHostNameToLowerCase(listExpressions); List<Expression> listMergedExpressions = mergeExpressions(listExpressions, ctx.getMaxPrecedence()); return listMergedExpressions.isEmpty() ? null : listMergedExpressions.get(0).toPredicate(); }
223551095_1877
public Predicate parse(Token[] tokens) throws InvalidQueryException { ParseContext ctx = parseExpressions(tokens); List<Expression> listExpressions = ctx.getExpressions(); changeHostNameToLowerCase(listExpressions); List<Expression> listMergedExpressions = mergeExpressions(listExpressions, ctx.getMaxPrecedence()); return listMergedExpressions.isEmpty() ? null : listMergedExpressions.get(0).toPredicate(); }
223551095_1878
public Predicate parse(Token[] tokens) throws InvalidQueryException { ParseContext ctx = parseExpressions(tokens); List<Expression> listExpressions = ctx.getExpressions(); changeHostNameToLowerCase(listExpressions); List<Expression> listMergedExpressions = mergeExpressions(listExpressions, ctx.getMaxPrecedence()); return listMergedExpressions.isEmpty() ? null : listMergedExpressions.get(0).toPredicate(); }
223551095_1879
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1880
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1881
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1882
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1883
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1884
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1885
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1886
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1887
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1888
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1889
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1890
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1891
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1892
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1893
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1894
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1895
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1896
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1897
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1898
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1899
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1900
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1901
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1902
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1903
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1904
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1905
public Token[] tokens(String exp) throws InvalidQueryException { return tokens(exp, Collections.emptySet()); }
223551095_1906
@Override public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException { HttpChannel connection = HttpConnection.getCurrentConnection().getHttpChannel(); connection.getRequest().setHandled(true); response.setContentType(MimeTypes.Type.TEXT_PLAIN.asString()); Map<String, Object> errorMap = new LinkedHashMap<>(); int code = connection.getResponse().getStatus(); errorMap.put("status", code); String message = connection.getResponse().getReason(); if (message == null) { message = HttpStatus.getMessage(code); } errorMap.put("message", message); if ((code == HttpServletResponse.SC_FORBIDDEN) || (code == HttpServletResponse.SC_UNAUTHORIZED)) { //if SSO is configured we should provide info about it in case of access error JwtAuthenticationProperties jwtProperties = jwtAuthenticationPropertiesProvider.get(); if ((jwtProperties != null) && jwtProperties.isEnabledForAmbari()) { String providerUrl = jwtProperties.getAuthenticationProviderUrl(); String originalUrl = jwtProperties.getOriginalUrlQueryParam(); if (StringUtils.isEmpty(providerUrl)) { LOG.warn("The SSO provider URL is not available, forwarding to the SSO provider is not possible"); } else if (StringUtils.isEmpty(originalUrl)) { LOG.warn("The original URL parameter name is not available, forwarding to the SSO provider is not possible"); } else { errorMap.put("jwtProviderUrl", String.format("%s?%s=", providerUrl, originalUrl)); } } } gson.toJson(errorMap, response.getWriter()); }
223551095_1913
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { if (request instanceof HttpServletRequest) { HttpServletRequest httpServletRequest = (HttpServletRequest) request; String contentType = httpServletRequest.getContentType(); if (contentType != null && contentType.startsWith(MediaType.APPLICATION_JSON) && !isUrlExcluded(httpServletRequest.getPathInfo())) { ContentTypeOverrideRequestWrapper requestWrapper = new ContentTypeOverrideRequestWrapper(httpServletRequest); ContentTypeOverrideResponseWrapper responseWrapper = new ContentTypeOverrideResponseWrapper((HttpServletResponse) response); chain.doFilter(requestWrapper, responseWrapper); return; } } chain.doFilter(request, response); }
223551095_1914
public List<Stage> getActions(long requestId) { return db.getAllStages(requestId); }
223551095_1915
public String getRequestContext() { return requestContext; }
223551095_1916
@Override @Transactional @TransactionalLock(lockArea = LockArea.HRC_STATUS_CACHE, lockType = LockType.WRITE) public void persistActions(Request request) throws AmbariException { RequestEntity requestEntity = request.constructNewPersistenceEntity(); Long clusterId = -1L; String clusterName = null; Long requestId = requestEntity.getRequestId(); ClusterEntity clusterEntity = clusterDAO.findById(request.getClusterId()); if (clusterEntity != null) { clusterId = clusterEntity.getClusterId(); clusterName = clusterEntity.getClusterName(); } requestEntity.setClusterId(clusterId); requestDAO.create(requestEntity); //TODO wire request to cluster List<StageEntity> stageEntities = new ArrayList<>(request.getStages().size()); addRequestToAuditlogCache(request); List<HostRoleCommand> hostRoleCommands = new ArrayList<>(); for (Stage stage : request.getStages()) { StageEntity stageEntity = stage.constructNewPersistenceEntity(); Long stageId = stageEntity.getStageId(); stageEntities.add(stageEntity); stageEntity.setClusterId(clusterId); stageEntity.setRequest(requestEntity); stageDAO.create(stageEntity); List<HostRoleCommand> orderedHostRoleCommands = stage.getOrderedHostRoleCommands(); List<HostRoleCommandEntity> hostRoleCommandEntities = new ArrayList<>(); for (HostRoleCommand hostRoleCommand : orderedHostRoleCommands) { hostRoleCommand.setRequestId(requestId); hostRoleCommand.setStageId(stageId); HostRoleCommandEntity hostRoleCommandEntity = hostRoleCommand.constructNewPersistenceEntity(); hostRoleCommandEntity.setStage(stageEntity); hostRoleCommandDAO.create(hostRoleCommandEntity); hostRoleCommandEntities.add(hostRoleCommandEntity); hostRoleCommand.setTaskId(hostRoleCommandEntity.getTaskId()); String prefix = ""; String output = "output-" + hostRoleCommandEntity.getTaskId() + ".txt"; String error = "errors-" + hostRoleCommandEntity.getTaskId() + ".txt"; HostEntity hostEntity = null; if (null != hostRoleCommandEntity.getHostId()) { hostEntity = hostDAO.findById(hostRoleCommandEntity.getHostId()); if (hostEntity == null) { String msg = String.format("Host %s doesn't exist in database", hostRoleCommandEntity.getHostName()); LOG.error(msg); throw new AmbariException(msg); } hostRoleCommandEntity.setHostEntity(hostEntity); try { // Get the in-memory host object and its prefix to construct the output and error log paths. Host hostObject = clusters.getHost(hostEntity.getHostName()); if (!StringUtils.isBlank(hostObject.getPrefix())) { prefix = hostObject.getPrefix(); if (!prefix.endsWith("/")) { prefix = prefix + "/"; } } } catch (AmbariException e) { LOG.warn("Exception in getting prefix for host and setting output and error log files. Using no prefix"); } } hostRoleCommand.setOutputLog(prefix + output); hostRoleCommand.setErrorLog(prefix + error); hostRoleCommandEntity.setOutputLog(hostRoleCommand.getOutputLog()); hostRoleCommandEntity.setErrorLog(hostRoleCommand.getErrorLog()); ExecutionCommandEntity executionCommandEntity = hostRoleCommand.constructExecutionCommandEntity(); executionCommandEntity.setHostRoleCommand(hostRoleCommandEntity); executionCommandEntity.setTaskId(hostRoleCommandEntity.getTaskId()); hostRoleCommandEntity.setExecutionCommand(executionCommandEntity); executionCommandDAO.create(hostRoleCommandEntity.getExecutionCommand()); hostRoleCommandEntity = hostRoleCommandDAO.mergeWithoutPublishEvent(hostRoleCommandEntity); if (null != hostEntity) { hostEntity = hostDAO.merge(hostEntity); } hostRoleCommands.add(hostRoleCommand); } for (RoleSuccessCriteriaEntity roleSuccessCriteriaEntity : stageEntity.getRoleSuccessCriterias()) { roleSuccessCriteriaDAO.create(roleSuccessCriteriaEntity); } stageEntity.setHostRoleCommands(hostRoleCommandEntities); stageEntity = stageDAO.merge(stageEntity); } requestEntity.setStages(stageEntities); requestDAO.merge(requestEntity); TaskCreateEvent taskCreateEvent = new TaskCreateEvent(hostRoleCommands); taskEventPublisher.publish(taskCreateEvent); List<HostRoleCommandEntity> hostRoleCommandEntities = hostRoleCommandDAO.findByRequest(requestEntity.getRequestId()); // "requests" STOMP topic is used for clusters related requests only. // Requests without clusters (like host checks) should be posted to divided topic. if (clusterName != null) { STOMPUpdatePublisher.publish(new RequestUpdateEvent(requestEntity, hostRoleCommandDAO, topologyManager, clusterName, hostRoleCommandEntities)); } else { LOG.debug("No STOMP request update event was fired for new request due no cluster related, " + "request id: {}, command name: {}", requestEntity.getRequestId(), requestEntity.getCommandName()); } }
223551095_1917
@Override @Transactional @TransactionalLock(lockArea = LockArea.HRC_STATUS_CACHE, lockType = LockType.WRITE) public void persistActions(Request request) throws AmbariException { RequestEntity requestEntity = request.constructNewPersistenceEntity(); Long clusterId = -1L; String clusterName = null; Long requestId = requestEntity.getRequestId(); ClusterEntity clusterEntity = clusterDAO.findById(request.getClusterId()); if (clusterEntity != null) { clusterId = clusterEntity.getClusterId(); clusterName = clusterEntity.getClusterName(); } requestEntity.setClusterId(clusterId); requestDAO.create(requestEntity); //TODO wire request to cluster List<StageEntity> stageEntities = new ArrayList<>(request.getStages().size()); addRequestToAuditlogCache(request); List<HostRoleCommand> hostRoleCommands = new ArrayList<>(); for (Stage stage : request.getStages()) { StageEntity stageEntity = stage.constructNewPersistenceEntity(); Long stageId = stageEntity.getStageId(); stageEntities.add(stageEntity); stageEntity.setClusterId(clusterId); stageEntity.setRequest(requestEntity); stageDAO.create(stageEntity); List<HostRoleCommand> orderedHostRoleCommands = stage.getOrderedHostRoleCommands(); List<HostRoleCommandEntity> hostRoleCommandEntities = new ArrayList<>(); for (HostRoleCommand hostRoleCommand : orderedHostRoleCommands) { hostRoleCommand.setRequestId(requestId); hostRoleCommand.setStageId(stageId); HostRoleCommandEntity hostRoleCommandEntity = hostRoleCommand.constructNewPersistenceEntity(); hostRoleCommandEntity.setStage(stageEntity); hostRoleCommandDAO.create(hostRoleCommandEntity); hostRoleCommandEntities.add(hostRoleCommandEntity); hostRoleCommand.setTaskId(hostRoleCommandEntity.getTaskId()); String prefix = ""; String output = "output-" + hostRoleCommandEntity.getTaskId() + ".txt"; String error = "errors-" + hostRoleCommandEntity.getTaskId() + ".txt"; HostEntity hostEntity = null; if (null != hostRoleCommandEntity.getHostId()) { hostEntity = hostDAO.findById(hostRoleCommandEntity.getHostId()); if (hostEntity == null) { String msg = String.format("Host %s doesn't exist in database", hostRoleCommandEntity.getHostName()); LOG.error(msg); throw new AmbariException(msg); } hostRoleCommandEntity.setHostEntity(hostEntity); try { // Get the in-memory host object and its prefix to construct the output and error log paths. Host hostObject = clusters.getHost(hostEntity.getHostName()); if (!StringUtils.isBlank(hostObject.getPrefix())) { prefix = hostObject.getPrefix(); if (!prefix.endsWith("/")) { prefix = prefix + "/"; } } } catch (AmbariException e) { LOG.warn("Exception in getting prefix for host and setting output and error log files. Using no prefix"); } } hostRoleCommand.setOutputLog(prefix + output); hostRoleCommand.setErrorLog(prefix + error); hostRoleCommandEntity.setOutputLog(hostRoleCommand.getOutputLog()); hostRoleCommandEntity.setErrorLog(hostRoleCommand.getErrorLog()); ExecutionCommandEntity executionCommandEntity = hostRoleCommand.constructExecutionCommandEntity(); executionCommandEntity.setHostRoleCommand(hostRoleCommandEntity); executionCommandEntity.setTaskId(hostRoleCommandEntity.getTaskId()); hostRoleCommandEntity.setExecutionCommand(executionCommandEntity); executionCommandDAO.create(hostRoleCommandEntity.getExecutionCommand()); hostRoleCommandEntity = hostRoleCommandDAO.mergeWithoutPublishEvent(hostRoleCommandEntity); if (null != hostEntity) { hostEntity = hostDAO.merge(hostEntity); } hostRoleCommands.add(hostRoleCommand); } for (RoleSuccessCriteriaEntity roleSuccessCriteriaEntity : stageEntity.getRoleSuccessCriterias()) { roleSuccessCriteriaDAO.create(roleSuccessCriteriaEntity); } stageEntity.setHostRoleCommands(hostRoleCommandEntities); stageEntity = stageDAO.merge(stageEntity); } requestEntity.setStages(stageEntities); requestDAO.merge(requestEntity); TaskCreateEvent taskCreateEvent = new TaskCreateEvent(hostRoleCommands); taskEventPublisher.publish(taskCreateEvent); List<HostRoleCommandEntity> hostRoleCommandEntities = hostRoleCommandDAO.findByRequest(requestEntity.getRequestId()); // "requests" STOMP topic is used for clusters related requests only. // Requests without clusters (like host checks) should be posted to divided topic. if (clusterName != null) { STOMPUpdatePublisher.publish(new RequestUpdateEvent(requestEntity, hostRoleCommandDAO, topologyManager, clusterName, hostRoleCommandEntities)); } else { LOG.debug("No STOMP request update event was fired for new request due no cluster related, " + "request id: {}, command name: {}", requestEntity.getRequestId(), requestEntity.getCommandName()); } }
223551095_1918
@Override @Transactional public void hostRoleScheduled(Stage s, String hostname, String roleStr) { HostRoleCommand hostRoleCommand = s.getHostRoleCommand(hostname, roleStr); HostRoleCommandEntity entity = hostRoleCommandDAO.findByPK(hostRoleCommand.getTaskId()); if (entity != null) { entity.setStartTime(hostRoleCommand.getStartTime()); if (entity.getOriginalStartTime() == null || entity.getOriginalStartTime() == -1) { entity.setOriginalStartTime(System.currentTimeMillis()); } entity.setLastAttemptTime(hostRoleCommand.getLastAttemptTime()); entity.setStatus(hostRoleCommand.getStatus()); entity.setAttemptCount(hostRoleCommand.getAttemptCount()); auditLog(entity, s.getRequestId()); hostRoleCommandDAO.merge(entity); } else { throw new RuntimeException("HostRoleCommand is not persisted, cannot update:\n" + hostRoleCommand); } }
223551095_1919
@Override public List<Long> getRequestsByStatus(RequestStatus status, int maxResults, boolean ascOrder) { if (null == status) { return requestDAO.findAllRequestIds(maxResults, ascOrder); } EnumSet<HostRoleStatus> taskStatuses = null; switch( status ){ case IN_PROGRESS: taskStatuses = HostRoleStatus.IN_PROGRESS_STATUSES; break; case FAILED: taskStatuses = HostRoleStatus.FAILED_STATUSES; break; case COMPLETED: // !!! COMPLETED is special as all tasks in the request must be // completed return hostRoleCommandDAO.getCompletedRequests(maxResults, ascOrder); } return hostRoleCommandDAO.getRequestsByTaskStatus(taskStatuses, maxResults, ascOrder); }
223551095_1920
@Override public List<Long> getRequestsByStatus(RequestStatus status, int maxResults, boolean ascOrder) { if (null == status) { return requestDAO.findAllRequestIds(maxResults, ascOrder); } EnumSet<HostRoleStatus> taskStatuses = null; switch( status ){ case IN_PROGRESS: taskStatuses = HostRoleStatus.IN_PROGRESS_STATUSES; break; case FAILED: taskStatuses = HostRoleStatus.FAILED_STATUSES; break; case COMPLETED: // !!! COMPLETED is special as all tasks in the request must be // completed return hostRoleCommandDAO.getCompletedRequests(maxResults, ascOrder); } return hostRoleCommandDAO.getRequestsByTaskStatus(taskStatuses, maxResults, ascOrder); }
223551095_1921
@Override public List<Long> getRequestsByStatus(RequestStatus status, int maxResults, boolean ascOrder) { if (null == status) { return requestDAO.findAllRequestIds(maxResults, ascOrder); } EnumSet<HostRoleStatus> taskStatuses = null; switch( status ){ case IN_PROGRESS: taskStatuses = HostRoleStatus.IN_PROGRESS_STATUSES; break; case FAILED: taskStatuses = HostRoleStatus.FAILED_STATUSES; break; case COMPLETED: // !!! COMPLETED is special as all tasks in the request must be // completed return hostRoleCommandDAO.getCompletedRequests(maxResults, ascOrder); } return hostRoleCommandDAO.getRequestsByTaskStatus(taskStatuses, maxResults, ascOrder); }
223551095_1922
@Override @Transactional @TransactionalLock(lockArea = LockArea.HRC_STATUS_CACHE, lockType = LockType.WRITE) public void persistActions(Request request) throws AmbariException { RequestEntity requestEntity = request.constructNewPersistenceEntity(); Long clusterId = -1L; String clusterName = null; Long requestId = requestEntity.getRequestId(); ClusterEntity clusterEntity = clusterDAO.findById(request.getClusterId()); if (clusterEntity != null) { clusterId = clusterEntity.getClusterId(); clusterName = clusterEntity.getClusterName(); } requestEntity.setClusterId(clusterId); requestDAO.create(requestEntity); //TODO wire request to cluster List<StageEntity> stageEntities = new ArrayList<>(request.getStages().size()); addRequestToAuditlogCache(request); List<HostRoleCommand> hostRoleCommands = new ArrayList<>(); for (Stage stage : request.getStages()) { StageEntity stageEntity = stage.constructNewPersistenceEntity(); Long stageId = stageEntity.getStageId(); stageEntities.add(stageEntity); stageEntity.setClusterId(clusterId); stageEntity.setRequest(requestEntity); stageDAO.create(stageEntity); List<HostRoleCommand> orderedHostRoleCommands = stage.getOrderedHostRoleCommands(); List<HostRoleCommandEntity> hostRoleCommandEntities = new ArrayList<>(); for (HostRoleCommand hostRoleCommand : orderedHostRoleCommands) { hostRoleCommand.setRequestId(requestId); hostRoleCommand.setStageId(stageId); HostRoleCommandEntity hostRoleCommandEntity = hostRoleCommand.constructNewPersistenceEntity(); hostRoleCommandEntity.setStage(stageEntity); hostRoleCommandDAO.create(hostRoleCommandEntity); hostRoleCommandEntities.add(hostRoleCommandEntity); hostRoleCommand.setTaskId(hostRoleCommandEntity.getTaskId()); String prefix = ""; String output = "output-" + hostRoleCommandEntity.getTaskId() + ".txt"; String error = "errors-" + hostRoleCommandEntity.getTaskId() + ".txt"; HostEntity hostEntity = null; if (null != hostRoleCommandEntity.getHostId()) { hostEntity = hostDAO.findById(hostRoleCommandEntity.getHostId()); if (hostEntity == null) { String msg = String.format("Host %s doesn't exist in database", hostRoleCommandEntity.getHostName()); LOG.error(msg); throw new AmbariException(msg); } hostRoleCommandEntity.setHostEntity(hostEntity); try { // Get the in-memory host object and its prefix to construct the output and error log paths. Host hostObject = clusters.getHost(hostEntity.getHostName()); if (!StringUtils.isBlank(hostObject.getPrefix())) { prefix = hostObject.getPrefix(); if (!prefix.endsWith("/")) { prefix = prefix + "/"; } } } catch (AmbariException e) { LOG.warn("Exception in getting prefix for host and setting output and error log files. Using no prefix"); } } hostRoleCommand.setOutputLog(prefix + output); hostRoleCommand.setErrorLog(prefix + error); hostRoleCommandEntity.setOutputLog(hostRoleCommand.getOutputLog()); hostRoleCommandEntity.setErrorLog(hostRoleCommand.getErrorLog()); ExecutionCommandEntity executionCommandEntity = hostRoleCommand.constructExecutionCommandEntity(); executionCommandEntity.setHostRoleCommand(hostRoleCommandEntity); executionCommandEntity.setTaskId(hostRoleCommandEntity.getTaskId()); hostRoleCommandEntity.setExecutionCommand(executionCommandEntity); executionCommandDAO.create(hostRoleCommandEntity.getExecutionCommand()); hostRoleCommandEntity = hostRoleCommandDAO.mergeWithoutPublishEvent(hostRoleCommandEntity); if (null != hostEntity) { hostEntity = hostDAO.merge(hostEntity); } hostRoleCommands.add(hostRoleCommand); } for (RoleSuccessCriteriaEntity roleSuccessCriteriaEntity : stageEntity.getRoleSuccessCriterias()) { roleSuccessCriteriaDAO.create(roleSuccessCriteriaEntity); } stageEntity.setHostRoleCommands(hostRoleCommandEntities); stageEntity = stageDAO.merge(stageEntity); } requestEntity.setStages(stageEntities); requestDAO.merge(requestEntity); TaskCreateEvent taskCreateEvent = new TaskCreateEvent(hostRoleCommands); taskEventPublisher.publish(taskCreateEvent); List<HostRoleCommandEntity> hostRoleCommandEntities = hostRoleCommandDAO.findByRequest(requestEntity.getRequestId()); // "requests" STOMP topic is used for clusters related requests only. // Requests without clusters (like host checks) should be posted to divided topic. if (clusterName != null) { STOMPUpdatePublisher.publish(new RequestUpdateEvent(requestEntity, hostRoleCommandDAO, topologyManager, clusterName, hostRoleCommandEntities)); } else { LOG.debug("No STOMP request update event was fired for new request due no cluster related, " + "request id: {}, command name: {}", requestEntity.getRequestId(), requestEntity.getCommandName()); } }
223551095_1923
@Override @Transactional @TransactionalLock(lockArea = LockArea.HRC_STATUS_CACHE, lockType = LockType.WRITE) public void persistActions(Request request) throws AmbariException { RequestEntity requestEntity = request.constructNewPersistenceEntity(); Long clusterId = -1L; String clusterName = null; Long requestId = requestEntity.getRequestId(); ClusterEntity clusterEntity = clusterDAO.findById(request.getClusterId()); if (clusterEntity != null) { clusterId = clusterEntity.getClusterId(); clusterName = clusterEntity.getClusterName(); } requestEntity.setClusterId(clusterId); requestDAO.create(requestEntity); //TODO wire request to cluster List<StageEntity> stageEntities = new ArrayList<>(request.getStages().size()); addRequestToAuditlogCache(request); List<HostRoleCommand> hostRoleCommands = new ArrayList<>(); for (Stage stage : request.getStages()) { StageEntity stageEntity = stage.constructNewPersistenceEntity(); Long stageId = stageEntity.getStageId(); stageEntities.add(stageEntity); stageEntity.setClusterId(clusterId); stageEntity.setRequest(requestEntity); stageDAO.create(stageEntity); List<HostRoleCommand> orderedHostRoleCommands = stage.getOrderedHostRoleCommands(); List<HostRoleCommandEntity> hostRoleCommandEntities = new ArrayList<>(); for (HostRoleCommand hostRoleCommand : orderedHostRoleCommands) { hostRoleCommand.setRequestId(requestId); hostRoleCommand.setStageId(stageId); HostRoleCommandEntity hostRoleCommandEntity = hostRoleCommand.constructNewPersistenceEntity(); hostRoleCommandEntity.setStage(stageEntity); hostRoleCommandDAO.create(hostRoleCommandEntity); hostRoleCommandEntities.add(hostRoleCommandEntity); hostRoleCommand.setTaskId(hostRoleCommandEntity.getTaskId()); String prefix = ""; String output = "output-" + hostRoleCommandEntity.getTaskId() + ".txt"; String error = "errors-" + hostRoleCommandEntity.getTaskId() + ".txt"; HostEntity hostEntity = null; if (null != hostRoleCommandEntity.getHostId()) { hostEntity = hostDAO.findById(hostRoleCommandEntity.getHostId()); if (hostEntity == null) { String msg = String.format("Host %s doesn't exist in database", hostRoleCommandEntity.getHostName()); LOG.error(msg); throw new AmbariException(msg); } hostRoleCommandEntity.setHostEntity(hostEntity); try { // Get the in-memory host object and its prefix to construct the output and error log paths. Host hostObject = clusters.getHost(hostEntity.getHostName()); if (!StringUtils.isBlank(hostObject.getPrefix())) { prefix = hostObject.getPrefix(); if (!prefix.endsWith("/")) { prefix = prefix + "/"; } } } catch (AmbariException e) { LOG.warn("Exception in getting prefix for host and setting output and error log files. Using no prefix"); } } hostRoleCommand.setOutputLog(prefix + output); hostRoleCommand.setErrorLog(prefix + error); hostRoleCommandEntity.setOutputLog(hostRoleCommand.getOutputLog()); hostRoleCommandEntity.setErrorLog(hostRoleCommand.getErrorLog()); ExecutionCommandEntity executionCommandEntity = hostRoleCommand.constructExecutionCommandEntity(); executionCommandEntity.setHostRoleCommand(hostRoleCommandEntity); executionCommandEntity.setTaskId(hostRoleCommandEntity.getTaskId()); hostRoleCommandEntity.setExecutionCommand(executionCommandEntity); executionCommandDAO.create(hostRoleCommandEntity.getExecutionCommand()); hostRoleCommandEntity = hostRoleCommandDAO.mergeWithoutPublishEvent(hostRoleCommandEntity); if (null != hostEntity) { hostEntity = hostDAO.merge(hostEntity); } hostRoleCommands.add(hostRoleCommand); } for (RoleSuccessCriteriaEntity roleSuccessCriteriaEntity : stageEntity.getRoleSuccessCriterias()) { roleSuccessCriteriaDAO.create(roleSuccessCriteriaEntity); } stageEntity.setHostRoleCommands(hostRoleCommandEntities); stageEntity = stageDAO.merge(stageEntity); } requestEntity.setStages(stageEntities); requestDAO.merge(requestEntity); TaskCreateEvent taskCreateEvent = new TaskCreateEvent(hostRoleCommands); taskEventPublisher.publish(taskCreateEvent); List<HostRoleCommandEntity> hostRoleCommandEntities = hostRoleCommandDAO.findByRequest(requestEntity.getRequestId()); // "requests" STOMP topic is used for clusters related requests only. // Requests without clusters (like host checks) should be posted to divided topic. if (clusterName != null) { STOMPUpdatePublisher.publish(new RequestUpdateEvent(requestEntity, hostRoleCommandDAO, topologyManager, clusterName, hostRoleCommandEntities)); } else { LOG.debug("No STOMP request update event was fired for new request due no cluster related, " + "request id: {}, command name: {}", requestEntity.getRequestId(), requestEntity.getCommandName()); } }
223551095_1924
public ExecutionCommand getExecutionCommand() { if (executionCommand != null) { return executionCommand; } if( null == jsonExecutionCommand ){ throw new RuntimeException( "Invalid ExecutionCommandWrapper, both object and string representations are null"); } try { executionCommand = gson.fromJson(jsonExecutionCommand, ExecutionCommand.class); // sanity; if no configurations, just initialize to prevent NPEs if (null == executionCommand.getConfigurations()) { executionCommand.setConfigurations(new TreeMap<>()); } Map<String, Map<String, String>> configurations = executionCommand.getConfigurations(); // For a configuration type, both tag and an actual configuration can be stored // Configurations from the tag is always expanded and then over-written by the actual // global:version1:{a1:A1,b1:B1,d1:D1} + global:{a1:A2,c1:C1,DELETED_d1:x} ==> // global:{a1:A2,b1:B1,c1:C1} Long clusterId = hostRoleCommandDAO.findByPK( executionCommand.getTaskId()).getStage().getClusterId(); Cluster cluster = clusters.getClusterById(clusterId); // Execution commands may have config-tags already set during their creation. // However, these tags become stale at runtime when other // ExecutionCommands run and change the desired configs (like // ConfigureAction). Hence an ExecutionCommand can specify which // config-types should be refreshed at runtime. Specifying <code>*</code> // will result in all config-type tags to be refreshed to the latest // cluster desired-configs. Additionally, there may be no configuration // tags set but refresh might be set to *. In this case, they should still // be refreshed with the latest. boolean refreshConfigTagsBeforeExecution = executionCommand.getForceRefreshConfigTagsBeforeExecution(); if (refreshConfigTagsBeforeExecution) { Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs(); Map<String, Map<String, String>> configurationTags = configHelper.getEffectiveDesiredTags( cluster, executionCommand.getHostname(), desiredConfigs); LOG.debug( "While scheduling task {} on cluster {}, configurations are being refreshed using desired configurations of {}", executionCommand.getTaskId(), cluster.getClusterName(), desiredConfigs); // then clear out any existing configurations so that all of the new // configurations are forcefully applied configurations.clear(); executionCommand.setConfigurationTags(configurationTags); } // now that the tags have been updated (if necessary), fetch the // configurations Map<String, Map<String, String>> configurationTags = executionCommand.getConfigurationTags(); configHelper.getAndMergeHostConfigs(configurations, configurationTags, cluster); configHelper.getAndMergeHostConfigAttributes(executionCommand.getConfigurationAttributes(), configurationTags, cluster); setVersions(cluster); // provide some basic information about a cluster upgrade if there is one // in progress UpgradeEntity upgrade = cluster.getUpgradeInProgress(); if (null != upgrade) { UpgradeContext upgradeContext = upgradeContextFactory.create(cluster, upgrade); UpgradeSummary upgradeSummary = upgradeContext.getUpgradeSummary(); executionCommand.setUpgradeSummary(upgradeSummary); } // setting repositoryFile final Host host = cluster.getHost(executionCommand.getHostname()); // can be null on internal commands final String serviceName = executionCommand.getServiceName(); // can be null on executing special RU tasks if (null == executionCommand.getRepositoryFile() && null != host && null != serviceName) { final CommandRepository commandRepository; final Service service = cluster.getService(serviceName); final String componentName = executionCommand.getComponentName(); try { if (null != componentName) { ServiceComponent serviceComponent = service.getServiceComponent(componentName); commandRepository = repoVersionHelper.getCommandRepository(cluster, serviceComponent, host); } else { RepositoryVersionEntity repoVersion = service.getDesiredRepositoryVersion(); RepoOsEntity osEntity = repoVersionHelper.getOSEntityForHost(host, repoVersion); commandRepository = repoVersionHelper.getCommandRepository(repoVersion, osEntity); } executionCommand.setRepositoryFile(commandRepository); } catch (SystemException e) { LOG.debug("Unable to find command repository with a correct operating system for host {}", host, e); } } } catch (ClusterNotFoundException cnfe) { // it's possible that there are commands without clusters; in such cases, // just return the de-serialized command and don't try to read configs LOG.warn( "Unable to lookup the cluster by ID; assuming that there is no cluster and therefore no configs for this execution command: {}", cnfe.getMessage()); return executionCommand; } catch (AmbariException e) { throw new RuntimeException(e); } return executionCommand; }
223551095_1925
public ExecutionCommand getExecutionCommand() { if (executionCommand != null) { return executionCommand; } if( null == jsonExecutionCommand ){ throw new RuntimeException( "Invalid ExecutionCommandWrapper, both object and string representations are null"); } try { executionCommand = gson.fromJson(jsonExecutionCommand, ExecutionCommand.class); // sanity; if no configurations, just initialize to prevent NPEs if (null == executionCommand.getConfigurations()) { executionCommand.setConfigurations(new TreeMap<>()); } Map<String, Map<String, String>> configurations = executionCommand.getConfigurations(); // For a configuration type, both tag and an actual configuration can be stored // Configurations from the tag is always expanded and then over-written by the actual // global:version1:{a1:A1,b1:B1,d1:D1} + global:{a1:A2,c1:C1,DELETED_d1:x} ==> // global:{a1:A2,b1:B1,c1:C1} Long clusterId = hostRoleCommandDAO.findByPK( executionCommand.getTaskId()).getStage().getClusterId(); Cluster cluster = clusters.getClusterById(clusterId); // Execution commands may have config-tags already set during their creation. // However, these tags become stale at runtime when other // ExecutionCommands run and change the desired configs (like // ConfigureAction). Hence an ExecutionCommand can specify which // config-types should be refreshed at runtime. Specifying <code>*</code> // will result in all config-type tags to be refreshed to the latest // cluster desired-configs. Additionally, there may be no configuration // tags set but refresh might be set to *. In this case, they should still // be refreshed with the latest. boolean refreshConfigTagsBeforeExecution = executionCommand.getForceRefreshConfigTagsBeforeExecution(); if (refreshConfigTagsBeforeExecution) { Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs(); Map<String, Map<String, String>> configurationTags = configHelper.getEffectiveDesiredTags( cluster, executionCommand.getHostname(), desiredConfigs); LOG.debug( "While scheduling task {} on cluster {}, configurations are being refreshed using desired configurations of {}", executionCommand.getTaskId(), cluster.getClusterName(), desiredConfigs); // then clear out any existing configurations so that all of the new // configurations are forcefully applied configurations.clear(); executionCommand.setConfigurationTags(configurationTags); } // now that the tags have been updated (if necessary), fetch the // configurations Map<String, Map<String, String>> configurationTags = executionCommand.getConfigurationTags(); configHelper.getAndMergeHostConfigs(configurations, configurationTags, cluster); configHelper.getAndMergeHostConfigAttributes(executionCommand.getConfigurationAttributes(), configurationTags, cluster); setVersions(cluster); // provide some basic information about a cluster upgrade if there is one // in progress UpgradeEntity upgrade = cluster.getUpgradeInProgress(); if (null != upgrade) { UpgradeContext upgradeContext = upgradeContextFactory.create(cluster, upgrade); UpgradeSummary upgradeSummary = upgradeContext.getUpgradeSummary(); executionCommand.setUpgradeSummary(upgradeSummary); } // setting repositoryFile final Host host = cluster.getHost(executionCommand.getHostname()); // can be null on internal commands final String serviceName = executionCommand.getServiceName(); // can be null on executing special RU tasks if (null == executionCommand.getRepositoryFile() && null != host && null != serviceName) { final CommandRepository commandRepository; final Service service = cluster.getService(serviceName); final String componentName = executionCommand.getComponentName(); try { if (null != componentName) { ServiceComponent serviceComponent = service.getServiceComponent(componentName); commandRepository = repoVersionHelper.getCommandRepository(cluster, serviceComponent, host); } else { RepositoryVersionEntity repoVersion = service.getDesiredRepositoryVersion(); RepoOsEntity osEntity = repoVersionHelper.getOSEntityForHost(host, repoVersion); commandRepository = repoVersionHelper.getCommandRepository(repoVersion, osEntity); } executionCommand.setRepositoryFile(commandRepository); } catch (SystemException e) { LOG.debug("Unable to find command repository with a correct operating system for host {}", host, e); } } } catch (ClusterNotFoundException cnfe) { // it's possible that there are commands without clusters; in such cases, // just return the de-serialized command and don't try to read configs LOG.warn( "Unable to lookup the cluster by ID; assuming that there is no cluster and therefore no configs for this execution command: {}", cnfe.getMessage()); return executionCommand; } catch (AmbariException e) { throw new RuntimeException(e); } return executionCommand; }
223551095_1926
public ExecutionCommand getExecutionCommand() { if (executionCommand != null) { return executionCommand; } if( null == jsonExecutionCommand ){ throw new RuntimeException( "Invalid ExecutionCommandWrapper, both object and string representations are null"); } try { executionCommand = gson.fromJson(jsonExecutionCommand, ExecutionCommand.class); // sanity; if no configurations, just initialize to prevent NPEs if (null == executionCommand.getConfigurations()) { executionCommand.setConfigurations(new TreeMap<>()); } Map<String, Map<String, String>> configurations = executionCommand.getConfigurations(); // For a configuration type, both tag and an actual configuration can be stored // Configurations from the tag is always expanded and then over-written by the actual // global:version1:{a1:A1,b1:B1,d1:D1} + global:{a1:A2,c1:C1,DELETED_d1:x} ==> // global:{a1:A2,b1:B1,c1:C1} Long clusterId = hostRoleCommandDAO.findByPK( executionCommand.getTaskId()).getStage().getClusterId(); Cluster cluster = clusters.getClusterById(clusterId); // Execution commands may have config-tags already set during their creation. // However, these tags become stale at runtime when other // ExecutionCommands run and change the desired configs (like // ConfigureAction). Hence an ExecutionCommand can specify which // config-types should be refreshed at runtime. Specifying <code>*</code> // will result in all config-type tags to be refreshed to the latest // cluster desired-configs. Additionally, there may be no configuration // tags set but refresh might be set to *. In this case, they should still // be refreshed with the latest. boolean refreshConfigTagsBeforeExecution = executionCommand.getForceRefreshConfigTagsBeforeExecution(); if (refreshConfigTagsBeforeExecution) { Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs(); Map<String, Map<String, String>> configurationTags = configHelper.getEffectiveDesiredTags( cluster, executionCommand.getHostname(), desiredConfigs); LOG.debug( "While scheduling task {} on cluster {}, configurations are being refreshed using desired configurations of {}", executionCommand.getTaskId(), cluster.getClusterName(), desiredConfigs); // then clear out any existing configurations so that all of the new // configurations are forcefully applied configurations.clear(); executionCommand.setConfigurationTags(configurationTags); } // now that the tags have been updated (if necessary), fetch the // configurations Map<String, Map<String, String>> configurationTags = executionCommand.getConfigurationTags(); configHelper.getAndMergeHostConfigs(configurations, configurationTags, cluster); configHelper.getAndMergeHostConfigAttributes(executionCommand.getConfigurationAttributes(), configurationTags, cluster); setVersions(cluster); // provide some basic information about a cluster upgrade if there is one // in progress UpgradeEntity upgrade = cluster.getUpgradeInProgress(); if (null != upgrade) { UpgradeContext upgradeContext = upgradeContextFactory.create(cluster, upgrade); UpgradeSummary upgradeSummary = upgradeContext.getUpgradeSummary(); executionCommand.setUpgradeSummary(upgradeSummary); } // setting repositoryFile final Host host = cluster.getHost(executionCommand.getHostname()); // can be null on internal commands final String serviceName = executionCommand.getServiceName(); // can be null on executing special RU tasks if (null == executionCommand.getRepositoryFile() && null != host && null != serviceName) { final CommandRepository commandRepository; final Service service = cluster.getService(serviceName); final String componentName = executionCommand.getComponentName(); try { if (null != componentName) { ServiceComponent serviceComponent = service.getServiceComponent(componentName); commandRepository = repoVersionHelper.getCommandRepository(cluster, serviceComponent, host); } else { RepositoryVersionEntity repoVersion = service.getDesiredRepositoryVersion(); RepoOsEntity osEntity = repoVersionHelper.getOSEntityForHost(host, repoVersion); commandRepository = repoVersionHelper.getCommandRepository(repoVersion, osEntity); } executionCommand.setRepositoryFile(commandRepository); } catch (SystemException e) { LOG.debug("Unable to find command repository with a correct operating system for host {}", host, e); } } } catch (ClusterNotFoundException cnfe) { // it's possible that there are commands without clusters; in such cases, // just return the de-serialized command and don't try to read configs LOG.warn( "Unable to lookup the cluster by ID; assuming that there is no cluster and therefore no configs for this execution command: {}", cnfe.getMessage()); return executionCommand; } catch (AmbariException e) { throw new RuntimeException(e); } return executionCommand; }
223551095_1927
public void doWork() throws AmbariException { try { unitOfWork.begin(); // grab a reference to this UnitOfWork's EM threadEntityManager = entityManagerProvider.get(); // The first thing to do is to abort requests that are cancelled processCancelledRequestsList(); // !!! getting the stages in progress could be a very expensive call due // to the join being used; there's no need to make it if there are // no commands in progress if (db.getCommandsInProgressCount() == 0) { // Nothing to do if (LOG.isDebugEnabled()) { LOG.debug("There are no stages currently in progress."); } return; } Set<Long> runningRequestIds = new HashSet<>(); List<Stage> firstStageInProgressPerRequest = db.getFirstStageInProgressPerRequest(); if (LOG.isDebugEnabled()) { LOG.debug("Scheduler wakes up"); LOG.debug("Processing {} in progress stages", firstStageInProgressPerRequest.size()); } publishInProgressTasks(firstStageInProgressPerRequest); if (firstStageInProgressPerRequest.isEmpty()) { // Nothing to do if (LOG.isDebugEnabled()) { LOG.debug("There are no stages currently in progress."); } return; } int i_stage = 0; // get the range of requests in progress long iLowestRequestIdInProgress = firstStageInProgressPerRequest.get(0).getRequestId(); long iHighestRequestIdInProgress = firstStageInProgressPerRequest.get( firstStageInProgressPerRequest.size() - 1).getRequestId(); List<String> hostsWithPendingTasks = hostRoleCommandDAO.getHostsWithPendingTasks( iLowestRequestIdInProgress, iHighestRequestIdInProgress); // filter the stages in progress down to those which can be scheduled in // parallel List<Stage> stages = filterParallelPerHostStages(firstStageInProgressPerRequest); boolean exclusiveRequestIsGoing = false; // This loop greatly depends on the fact that order of stages in // a list does not change between invocations for (Stage stage : stages) { // Check if we can process this stage in parallel with another stages i_stage++; long requestId = stage.getRequestId(); LOG.debug("==> STAGE_i = {}(requestId={},StageId={})", i_stage, requestId, stage.getStageId()); RequestEntity request = db.getRequestEntity(requestId); if (request.isExclusive()) { if (runningRequestIds.size() > 0) { // As a result, we will wait until any previous stages are finished LOG.debug("Stage requires exclusive execution, but other requests are already executing. Stopping for now"); break; } exclusiveRequestIsGoing = true; } if (runningRequestIds.contains(requestId)) { // We don't want to process different stages from the same request in parallel LOG.debug("==> We don't want to process different stages from the same request in parallel"); continue; } else { runningRequestIds.add(requestId); if (!requestsInProgress.contains(requestId)) { requestsInProgress.add(requestId); db.startRequest(requestId); } } // Commands that will be scheduled in current scheduler wakeup List<ExecutionCommand> commandsToSchedule = new ArrayList<>(); Multimap<Long, AgentCommand> commandsToEnqueue = ArrayListMultimap.create(); Map<String, RoleStats> roleStats = processInProgressStage(stage, commandsToSchedule, commandsToEnqueue); // Check if stage is failed boolean failed = false; for (Map.Entry<String, RoleStats> entry : roleStats.entrySet()) { String role = entry.getKey(); RoleStats stats = entry.getValue(); if (LOG.isDebugEnabled()) { LOG.debug("Stats for role: {}, stats={}", role, stats); } // only fail the request if the role failed and the stage is not // skippable if (stats.isRoleFailed() && !stage.isSkippable()) { LOG.warn("{} failed, request {} will be aborted", role, request.getRequestId()); failed = true; break; } } if (!failed) { // Prior stage may have failed and it may need to fail the whole request failed = hasPreviousStageFailed(stage); } if (failed) { LOG.error("Operation completely failed, aborting request id: {}", stage.getRequestId()); cancelHostRoleCommands(stage.getOrderedHostRoleCommands(), FAILED_TASK_ABORT_REASONING); abortOperationsForStage(stage); return; } List<ExecutionCommand> commandsToStart = new ArrayList<>(); List<ExecutionCommand> commandsToUpdate = new ArrayList<>(); //Schedule what we have so far for (ExecutionCommand cmd : commandsToSchedule) { ConfigHelper.processHiddenAttribute(cmd.getConfigurations(), cmd.getConfigurationAttributes(), cmd.getRole(), false); processHostRole(request, stage, cmd, commandsToStart, commandsToUpdate); } LOG.debug("==> Commands to start: {}", commandsToStart.size()); LOG.debug("==> Commands to update: {}", commandsToUpdate.size()); //Multimap is analog of Map<Object, List<Object>> but allows to avoid nested loop ListMultimap<String, ServiceComponentHostEvent> eventMap = formEventMap(stage, commandsToStart); Map<ExecutionCommand, String> commandsToAbort = new HashMap<>(); if (!eventMap.isEmpty()) { LOG.debug("==> processing {} serviceComponentHostEvents...", eventMap.size()); Cluster cluster = clusters.getCluster(stage.getClusterName()); if (cluster != null) { Map<ServiceComponentHostEvent, String> failedEvents = cluster.processServiceComponentHostEvents(eventMap); if (failedEvents.size() > 0) { LOG.error("==> {} events failed.", failedEvents.size()); } for (Iterator<ExecutionCommand> iterator = commandsToUpdate.iterator(); iterator.hasNext(); ) { ExecutionCommand cmd = iterator.next(); for (ServiceComponentHostEvent event : failedEvents.keySet()) { if (StringUtils.equals(event.getHostName(), cmd.getHostname()) && StringUtils.equals(event.getServiceComponentName(), cmd.getRole())) { iterator.remove(); commandsToAbort.put(cmd, failedEvents.get(event)); break; } } } } else { LOG.warn("There was events to process but cluster {} not found", stage.getClusterName()); } } LOG.debug("==> Scheduling {} tasks...", commandsToUpdate.size()); db.bulkHostRoleScheduled(stage, commandsToUpdate); if (commandsToAbort.size() > 0) { // Code branch may be a bit slow, but is extremely rarely used LOG.debug("==> Aborting {} tasks...", commandsToAbort.size()); // Build a list of HostRoleCommands List<Long> taskIds = new ArrayList<>(); for (ExecutionCommand command : commandsToAbort.keySet()) { taskIds.add(command.getTaskId()); } Collection<HostRoleCommand> hostRoleCommands = db.getTasks(taskIds); cancelHostRoleCommands(hostRoleCommands, FAILED_TASK_ABORT_REASONING); db.bulkAbortHostRole(stage, commandsToAbort); } LOG.debug("==> Adding {} tasks to queue...", commandsToUpdate.size()); for (ExecutionCommand cmd : commandsToUpdate) { // Do not queue up server actions; however if we encounter one, wake up the ServerActionExecutor if (Role.AMBARI_SERVER_ACTION.name().equals(cmd.getRole())) { serverActionExecutor.awake(); } else { commandsToEnqueue.put(clusters.getHost(cmd.getHostname()).getHostId(), cmd); } } if (!commandsToEnqueue.isEmpty()) { agentCommandsPublisher.sendAgentCommand(commandsToEnqueue); } LOG.debug("==> Finished."); if (!configuration.getParallelStageExecution()) { // If disabled return; } if (exclusiveRequestIsGoing) { // As a result, we will prevent any further stages from being executed LOG.debug("Stage requires exclusive execution, skipping all executing any further stages"); break; } } requestsInProgress.retainAll(runningRequestIds); } finally { LOG.debug("Scheduler finished work."); unitOfWork.end(); } }
223551095_1928
protected boolean timeOutActionNeeded(HostRoleStatus status, Stage stage, Host host, String role, long currentTime, long taskTimeout) throws AmbariException { if (( !status.equals(HostRoleStatus.QUEUED) ) && ( ! status.equals(HostRoleStatus.IN_PROGRESS) )) { return false; } // tasks are held in a variety of in-memory maps that require a hostname key // host being null is ok - that means it's a server-side task String hostName = (null == host) ? null : host.getHostName(); // If we have other command in progress for this stage do not timeout this one if (hasCommandInProgress(stage, hostName) && !status.equals(HostRoleStatus.IN_PROGRESS)) { return false; } if (currentTime >= stage.getLastAttemptTime(hostName, role) + taskTimeout) { return true; } return false; }
223551095_1929
public void doWork() throws AmbariException { try { unitOfWork.begin(); // grab a reference to this UnitOfWork's EM threadEntityManager = entityManagerProvider.get(); // The first thing to do is to abort requests that are cancelled processCancelledRequestsList(); // !!! getting the stages in progress could be a very expensive call due // to the join being used; there's no need to make it if there are // no commands in progress if (db.getCommandsInProgressCount() == 0) { // Nothing to do if (LOG.isDebugEnabled()) { LOG.debug("There are no stages currently in progress."); } return; } Set<Long> runningRequestIds = new HashSet<>(); List<Stage> firstStageInProgressPerRequest = db.getFirstStageInProgressPerRequest(); if (LOG.isDebugEnabled()) { LOG.debug("Scheduler wakes up"); LOG.debug("Processing {} in progress stages", firstStageInProgressPerRequest.size()); } publishInProgressTasks(firstStageInProgressPerRequest); if (firstStageInProgressPerRequest.isEmpty()) { // Nothing to do if (LOG.isDebugEnabled()) { LOG.debug("There are no stages currently in progress."); } return; } int i_stage = 0; // get the range of requests in progress long iLowestRequestIdInProgress = firstStageInProgressPerRequest.get(0).getRequestId(); long iHighestRequestIdInProgress = firstStageInProgressPerRequest.get( firstStageInProgressPerRequest.size() - 1).getRequestId(); List<String> hostsWithPendingTasks = hostRoleCommandDAO.getHostsWithPendingTasks( iLowestRequestIdInProgress, iHighestRequestIdInProgress); // filter the stages in progress down to those which can be scheduled in // parallel List<Stage> stages = filterParallelPerHostStages(firstStageInProgressPerRequest); boolean exclusiveRequestIsGoing = false; // This loop greatly depends on the fact that order of stages in // a list does not change between invocations for (Stage stage : stages) { // Check if we can process this stage in parallel with another stages i_stage++; long requestId = stage.getRequestId(); LOG.debug("==> STAGE_i = {}(requestId={},StageId={})", i_stage, requestId, stage.getStageId()); RequestEntity request = db.getRequestEntity(requestId); if (request.isExclusive()) { if (runningRequestIds.size() > 0) { // As a result, we will wait until any previous stages are finished LOG.debug("Stage requires exclusive execution, but other requests are already executing. Stopping for now"); break; } exclusiveRequestIsGoing = true; } if (runningRequestIds.contains(requestId)) { // We don't want to process different stages from the same request in parallel LOG.debug("==> We don't want to process different stages from the same request in parallel"); continue; } else { runningRequestIds.add(requestId); if (!requestsInProgress.contains(requestId)) { requestsInProgress.add(requestId); db.startRequest(requestId); } } // Commands that will be scheduled in current scheduler wakeup List<ExecutionCommand> commandsToSchedule = new ArrayList<>(); Multimap<Long, AgentCommand> commandsToEnqueue = ArrayListMultimap.create(); Map<String, RoleStats> roleStats = processInProgressStage(stage, commandsToSchedule, commandsToEnqueue); // Check if stage is failed boolean failed = false; for (Map.Entry<String, RoleStats> entry : roleStats.entrySet()) { String role = entry.getKey(); RoleStats stats = entry.getValue(); if (LOG.isDebugEnabled()) { LOG.debug("Stats for role: {}, stats={}", role, stats); } // only fail the request if the role failed and the stage is not // skippable if (stats.isRoleFailed() && !stage.isSkippable()) { LOG.warn("{} failed, request {} will be aborted", role, request.getRequestId()); failed = true; break; } } if (!failed) { // Prior stage may have failed and it may need to fail the whole request failed = hasPreviousStageFailed(stage); } if (failed) { LOG.error("Operation completely failed, aborting request id: {}", stage.getRequestId()); cancelHostRoleCommands(stage.getOrderedHostRoleCommands(), FAILED_TASK_ABORT_REASONING); abortOperationsForStage(stage); return; } List<ExecutionCommand> commandsToStart = new ArrayList<>(); List<ExecutionCommand> commandsToUpdate = new ArrayList<>(); //Schedule what we have so far for (ExecutionCommand cmd : commandsToSchedule) { ConfigHelper.processHiddenAttribute(cmd.getConfigurations(), cmd.getConfigurationAttributes(), cmd.getRole(), false); processHostRole(request, stage, cmd, commandsToStart, commandsToUpdate); } LOG.debug("==> Commands to start: {}", commandsToStart.size()); LOG.debug("==> Commands to update: {}", commandsToUpdate.size()); //Multimap is analog of Map<Object, List<Object>> but allows to avoid nested loop ListMultimap<String, ServiceComponentHostEvent> eventMap = formEventMap(stage, commandsToStart); Map<ExecutionCommand, String> commandsToAbort = new HashMap<>(); if (!eventMap.isEmpty()) { LOG.debug("==> processing {} serviceComponentHostEvents...", eventMap.size()); Cluster cluster = clusters.getCluster(stage.getClusterName()); if (cluster != null) { Map<ServiceComponentHostEvent, String> failedEvents = cluster.processServiceComponentHostEvents(eventMap); if (failedEvents.size() > 0) { LOG.error("==> {} events failed.", failedEvents.size()); } for (Iterator<ExecutionCommand> iterator = commandsToUpdate.iterator(); iterator.hasNext(); ) { ExecutionCommand cmd = iterator.next(); for (ServiceComponentHostEvent event : failedEvents.keySet()) { if (StringUtils.equals(event.getHostName(), cmd.getHostname()) && StringUtils.equals(event.getServiceComponentName(), cmd.getRole())) { iterator.remove(); commandsToAbort.put(cmd, failedEvents.get(event)); break; } } } } else { LOG.warn("There was events to process but cluster {} not found", stage.getClusterName()); } } LOG.debug("==> Scheduling {} tasks...", commandsToUpdate.size()); db.bulkHostRoleScheduled(stage, commandsToUpdate); if (commandsToAbort.size() > 0) { // Code branch may be a bit slow, but is extremely rarely used LOG.debug("==> Aborting {} tasks...", commandsToAbort.size()); // Build a list of HostRoleCommands List<Long> taskIds = new ArrayList<>(); for (ExecutionCommand command : commandsToAbort.keySet()) { taskIds.add(command.getTaskId()); } Collection<HostRoleCommand> hostRoleCommands = db.getTasks(taskIds); cancelHostRoleCommands(hostRoleCommands, FAILED_TASK_ABORT_REASONING); db.bulkAbortHostRole(stage, commandsToAbort); } LOG.debug("==> Adding {} tasks to queue...", commandsToUpdate.size()); for (ExecutionCommand cmd : commandsToUpdate) { // Do not queue up server actions; however if we encounter one, wake up the ServerActionExecutor if (Role.AMBARI_SERVER_ACTION.name().equals(cmd.getRole())) { serverActionExecutor.awake(); } else { commandsToEnqueue.put(clusters.getHost(cmd.getHostname()).getHostId(), cmd); } } if (!commandsToEnqueue.isEmpty()) { agentCommandsPublisher.sendAgentCommand(commandsToEnqueue); } LOG.debug("==> Finished."); if (!configuration.getParallelStageExecution()) { // If disabled return; } if (exclusiveRequestIsGoing) { // As a result, we will prevent any further stages from being executed LOG.debug("Stage requires exclusive execution, skipping all executing any further stages"); break; } } requestsInProgress.retainAll(runningRequestIds); } finally { LOG.debug("Scheduler finished work."); unitOfWork.end(); } }
223551095_1930
public void doWork() throws AmbariException { try { unitOfWork.begin(); // grab a reference to this UnitOfWork's EM threadEntityManager = entityManagerProvider.get(); // The first thing to do is to abort requests that are cancelled processCancelledRequestsList(); // !!! getting the stages in progress could be a very expensive call due // to the join being used; there's no need to make it if there are // no commands in progress if (db.getCommandsInProgressCount() == 0) { // Nothing to do if (LOG.isDebugEnabled()) { LOG.debug("There are no stages currently in progress."); } return; } Set<Long> runningRequestIds = new HashSet<>(); List<Stage> firstStageInProgressPerRequest = db.getFirstStageInProgressPerRequest(); if (LOG.isDebugEnabled()) { LOG.debug("Scheduler wakes up"); LOG.debug("Processing {} in progress stages", firstStageInProgressPerRequest.size()); } publishInProgressTasks(firstStageInProgressPerRequest); if (firstStageInProgressPerRequest.isEmpty()) { // Nothing to do if (LOG.isDebugEnabled()) { LOG.debug("There are no stages currently in progress."); } return; } int i_stage = 0; // get the range of requests in progress long iLowestRequestIdInProgress = firstStageInProgressPerRequest.get(0).getRequestId(); long iHighestRequestIdInProgress = firstStageInProgressPerRequest.get( firstStageInProgressPerRequest.size() - 1).getRequestId(); List<String> hostsWithPendingTasks = hostRoleCommandDAO.getHostsWithPendingTasks( iLowestRequestIdInProgress, iHighestRequestIdInProgress); // filter the stages in progress down to those which can be scheduled in // parallel List<Stage> stages = filterParallelPerHostStages(firstStageInProgressPerRequest); boolean exclusiveRequestIsGoing = false; // This loop greatly depends on the fact that order of stages in // a list does not change between invocations for (Stage stage : stages) { // Check if we can process this stage in parallel with another stages i_stage++; long requestId = stage.getRequestId(); LOG.debug("==> STAGE_i = {}(requestId={},StageId={})", i_stage, requestId, stage.getStageId()); RequestEntity request = db.getRequestEntity(requestId); if (request.isExclusive()) { if (runningRequestIds.size() > 0) { // As a result, we will wait until any previous stages are finished LOG.debug("Stage requires exclusive execution, but other requests are already executing. Stopping for now"); break; } exclusiveRequestIsGoing = true; } if (runningRequestIds.contains(requestId)) { // We don't want to process different stages from the same request in parallel LOG.debug("==> We don't want to process different stages from the same request in parallel"); continue; } else { runningRequestIds.add(requestId); if (!requestsInProgress.contains(requestId)) { requestsInProgress.add(requestId); db.startRequest(requestId); } } // Commands that will be scheduled in current scheduler wakeup List<ExecutionCommand> commandsToSchedule = new ArrayList<>(); Multimap<Long, AgentCommand> commandsToEnqueue = ArrayListMultimap.create(); Map<String, RoleStats> roleStats = processInProgressStage(stage, commandsToSchedule, commandsToEnqueue); // Check if stage is failed boolean failed = false; for (Map.Entry<String, RoleStats> entry : roleStats.entrySet()) { String role = entry.getKey(); RoleStats stats = entry.getValue(); if (LOG.isDebugEnabled()) { LOG.debug("Stats for role: {}, stats={}", role, stats); } // only fail the request if the role failed and the stage is not // skippable if (stats.isRoleFailed() && !stage.isSkippable()) { LOG.warn("{} failed, request {} will be aborted", role, request.getRequestId()); failed = true; break; } } if (!failed) { // Prior stage may have failed and it may need to fail the whole request failed = hasPreviousStageFailed(stage); } if (failed) { LOG.error("Operation completely failed, aborting request id: {}", stage.getRequestId()); cancelHostRoleCommands(stage.getOrderedHostRoleCommands(), FAILED_TASK_ABORT_REASONING); abortOperationsForStage(stage); return; } List<ExecutionCommand> commandsToStart = new ArrayList<>(); List<ExecutionCommand> commandsToUpdate = new ArrayList<>(); //Schedule what we have so far for (ExecutionCommand cmd : commandsToSchedule) { ConfigHelper.processHiddenAttribute(cmd.getConfigurations(), cmd.getConfigurationAttributes(), cmd.getRole(), false); processHostRole(request, stage, cmd, commandsToStart, commandsToUpdate); } LOG.debug("==> Commands to start: {}", commandsToStart.size()); LOG.debug("==> Commands to update: {}", commandsToUpdate.size()); //Multimap is analog of Map<Object, List<Object>> but allows to avoid nested loop ListMultimap<String, ServiceComponentHostEvent> eventMap = formEventMap(stage, commandsToStart); Map<ExecutionCommand, String> commandsToAbort = new HashMap<>(); if (!eventMap.isEmpty()) { LOG.debug("==> processing {} serviceComponentHostEvents...", eventMap.size()); Cluster cluster = clusters.getCluster(stage.getClusterName()); if (cluster != null) { Map<ServiceComponentHostEvent, String> failedEvents = cluster.processServiceComponentHostEvents(eventMap); if (failedEvents.size() > 0) { LOG.error("==> {} events failed.", failedEvents.size()); } for (Iterator<ExecutionCommand> iterator = commandsToUpdate.iterator(); iterator.hasNext(); ) { ExecutionCommand cmd = iterator.next(); for (ServiceComponentHostEvent event : failedEvents.keySet()) { if (StringUtils.equals(event.getHostName(), cmd.getHostname()) && StringUtils.equals(event.getServiceComponentName(), cmd.getRole())) { iterator.remove(); commandsToAbort.put(cmd, failedEvents.get(event)); break; } } } } else { LOG.warn("There was events to process but cluster {} not found", stage.getClusterName()); } } LOG.debug("==> Scheduling {} tasks...", commandsToUpdate.size()); db.bulkHostRoleScheduled(stage, commandsToUpdate); if (commandsToAbort.size() > 0) { // Code branch may be a bit slow, but is extremely rarely used LOG.debug("==> Aborting {} tasks...", commandsToAbort.size()); // Build a list of HostRoleCommands List<Long> taskIds = new ArrayList<>(); for (ExecutionCommand command : commandsToAbort.keySet()) { taskIds.add(command.getTaskId()); } Collection<HostRoleCommand> hostRoleCommands = db.getTasks(taskIds); cancelHostRoleCommands(hostRoleCommands, FAILED_TASK_ABORT_REASONING); db.bulkAbortHostRole(stage, commandsToAbort); } LOG.debug("==> Adding {} tasks to queue...", commandsToUpdate.size()); for (ExecutionCommand cmd : commandsToUpdate) { // Do not queue up server actions; however if we encounter one, wake up the ServerActionExecutor if (Role.AMBARI_SERVER_ACTION.name().equals(cmd.getRole())) { serverActionExecutor.awake(); } else { commandsToEnqueue.put(clusters.getHost(cmd.getHostname()).getHostId(), cmd); } } if (!commandsToEnqueue.isEmpty()) { agentCommandsPublisher.sendAgentCommand(commandsToEnqueue); } LOG.debug("==> Finished."); if (!configuration.getParallelStageExecution()) { // If disabled return; } if (exclusiveRequestIsGoing) { // As a result, we will prevent any further stages from being executed LOG.debug("Stage requires exclusive execution, skipping all executing any further stages"); break; } } requestsInProgress.retainAll(runningRequestIds); } finally { LOG.debug("Scheduler finished work."); unitOfWork.end(); } }
223551095_1931
public void setTaskTimeoutAdjustment(boolean val) { taskTimeoutAdjustment = val; }
223551095_1932
void cancelHostRoleCommands(Collection<HostRoleCommand> hostRoleCommands, String reason) throws AmbariException { for (HostRoleCommand hostRoleCommand : hostRoleCommands) { // There are no server actions in actionQueue if (!Role.AMBARI_SERVER_ACTION.equals(hostRoleCommand.getRole())) { if (hostRoleCommand.getStatus() == HostRoleStatus.QUEUED || hostRoleCommand.getStatus() == HostRoleStatus.IN_PROGRESS) { CancelCommand cancelCommand = new CancelCommand(); cancelCommand.setTargetTaskId(hostRoleCommand.getTaskId()); cancelCommand.setReason(reason); agentCommandsPublisher.sendAgentCommand(hostRoleCommand.getHostId(), cancelCommand); } } if (hostRoleCommand.getStatus().isHoldingState()) { db.abortHostRole(hostRoleCommand.getHostName(), hostRoleCommand.getRequestId(), hostRoleCommand.getStageId(), hostRoleCommand.getRole().name()); } // If host role is an Action, we have to send an event if (hostRoleCommand.getRoleCommand().equals(RoleCommand.ACTIONEXECUTE)) { String clusterName = hostRoleCommand.getExecutionCommandWrapper().getExecutionCommand().getClusterName(); processActionDeath(clusterName, hostRoleCommand.getHostName(), hostRoleCommand.getRole().name()); } } }
223551095_1933
void cancelHostRoleCommands(Collection<HostRoleCommand> hostRoleCommands, String reason) throws AmbariException { for (HostRoleCommand hostRoleCommand : hostRoleCommands) { // There are no server actions in actionQueue if (!Role.AMBARI_SERVER_ACTION.equals(hostRoleCommand.getRole())) { if (hostRoleCommand.getStatus() == HostRoleStatus.QUEUED || hostRoleCommand.getStatus() == HostRoleStatus.IN_PROGRESS) { CancelCommand cancelCommand = new CancelCommand(); cancelCommand.setTargetTaskId(hostRoleCommand.getTaskId()); cancelCommand.setReason(reason); agentCommandsPublisher.sendAgentCommand(hostRoleCommand.getHostId(), cancelCommand); } } if (hostRoleCommand.getStatus().isHoldingState()) { db.abortHostRole(hostRoleCommand.getHostName(), hostRoleCommand.getRequestId(), hostRoleCommand.getStageId(), hostRoleCommand.getRole().name()); } // If host role is an Action, we have to send an event if (hostRoleCommand.getRoleCommand().equals(RoleCommand.ACTIONEXECUTE)) { String clusterName = hostRoleCommand.getExecutionCommandWrapper().getExecutionCommand().getClusterName(); processActionDeath(clusterName, hostRoleCommand.getHostName(), hostRoleCommand.getRole().name()); } } }