private static String save(PortletRequest request, ActionResponse response, PoolData data, boolean planOnly) {
ImportStatus status = getImportStatus(request);
if (data.abstractName == null || data.abstractName.equals("")) { // we're creating a new pool
data.name = data.name.replaceAll("\\s", "");
DeploymentManager mgr = ManagementHelper.getManagementHelper(request).getDeploymentManager();
try {
File rarFile = getRAR(request, data.getRarPath());
ConnectorDeployable deployable = new ConnectorDeployable(rarFile.toURL());
DeploymentConfiguration config = mgr.createConfiguration(deployable);
final DDBeanRoot ddBeanRoot = deployable.getDDBeanRoot();
Connector15DCBRoot root = (Connector15DCBRoot) config.getDConfigBeanRoot(ddBeanRoot);
ConnectorDCB connector = (ConnectorDCB) root.getDConfigBean(
ddBeanRoot.getChildBean(root.getXpaths()[0])[0]);
EnvironmentData environment = new EnvironmentData();
connector.setEnvironment(environment);
org.apache.geronimo.deployment.service.jsr88.Artifact configId = new org.apache.geronimo.deployment.service.jsr88.Artifact();
environment.setConfigId(configId);
configId.setGroupId("console.dbpool");
String artifactId = data.name;
if (artifactId.indexOf('/') != -1) {
// slash in artifact-id results in invalid configuration-id and leads to deployment errors
artifactId = artifactId.replaceAll("/", "%2F");
}
configId.setArtifactId(artifactId);
configId.setVersion("1.0");
configId.setType("rar");
String[] jars = data.getJars();
int length = jars[jars.length - 1].length() == 0 ? jars.length - 1 : jars.length;
org.apache.geronimo.deployment.service.jsr88.Artifact[] dependencies = new org.apache.geronimo.deployment.service.jsr88.Artifact[length];
for (int i = 0; i < dependencies.length; i++) {
dependencies[i] = new org.apache.geronimo.deployment.service.jsr88.Artifact();
}
environment.setDependencies(dependencies);
for (int i = 0; i < dependencies.length; i++) {
Artifact tmp = Artifact.create(jars[i]);
dependencies[i].setGroupId(tmp.getGroupId());
dependencies[i].setArtifactId(tmp.getArtifactId());
dependencies[i].setVersion(tmp.getVersion().toString());
dependencies[i].setType(tmp.getType());
}
ResourceAdapter adapter = connector.getResourceAdapter()[0];
ConnectionDefinition definition = new ConnectionDefinition();
adapter.setConnectionDefinition(new ConnectionDefinition[]{definition});
definition.setConnectionFactoryInterface("javax.sql.DataSource");
ConnectionDefinitionInstance instance = new ConnectionDefinitionInstance();
definition.setConnectionInstance(new ConnectionDefinitionInstance[]{instance});
instance.setName(data.getName());
ConfigPropertySetting[] settings = instance.getConfigPropertySetting();
if (data.isGeneric()) { // it's a generic TranQL JDBC pool
for (ConfigPropertySetting setting : settings) {
if (setting.getName().equals("UserName")) {
setting.setValue(data.user);
} else if (setting.getName().equals("Password")) {
setting.setValue(data.password);
} else if (setting.getName().equals("ConnectionURL")) {
setting.setValue(data.url);
} else if (setting.getName().equals("Driver")) {
setting.setValue(data.driverClass);
}
}
} else { // it's an XA driver or non-TranQL RA
for (ConfigPropertySetting setting : settings) {
String value = data.properties.get("property-" + setting.getName());
setting.setValue(value == null ? "" : value);
}
}
ConnectionManager manager = instance.getConnectionManager();
manager.setTransactionLocal(true);
SinglePool pool = new SinglePool();
manager.setPoolSingle(pool);
pool.setMatchOne(true);
// Max Size needs to be set before the minimum. This is because
// the connection manager will constrain the minimum based on the
// current maximum value in the pool. We might consider adding a
// setPoolConstraints method to allow specifying both at the same time.
if (data.maxSize != null && !data.maxSize.equals("")) {
pool.setMaxSize(new Integer(data.maxSize));
}
if (data.minSize != null && !data.minSize.equals("")) {
pool.setMinSize(new Integer(data.minSize));
}
if (data.blockingTimeout != null && !data.blockingTimeout.equals("")) {
pool.setBlockingTimeoutMillis(new Integer(data.blockingTimeout));
}
if (data.idleTimeout != null && !data.idleTimeout.equals("")) {
pool.setIdleTimeoutMinutes(new Integer(data.idleTimeout));
}
if (planOnly) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
config.save(out);
out.close();
return new String(out.toByteArray(), "US-ASCII");
} else {
File tempFile = File.createTempFile("console-deployment", ".xml");
tempFile.deleteOnExit();
log.debug("Writing database pool deployment plan to " + tempFile.getAbsolutePath());
BufferedOutputStream out = new BufferedOutputStream(new FileOutputStream(tempFile));
config.save(out);
out.flush();
out.close();
Target[] targets = mgr.getTargets();
if (null == targets) {
throw new IllegalStateException("No target to distribute to");
}
targets = new Target[] {targets[0]};
ProgressObject po = mgr.distribute(targets, rarFile, tempFile);
waitForProgress(po);
if (po.getDeploymentStatus().isCompleted()) {
TargetModuleID[] ids = po.getResultTargetModuleIDs();
po = mgr.start(ids);
waitForProgress(po);
if (po.getDeploymentStatus().isCompleted()) {
ids = po.getResultTargetModuleIDs();
if (status != null) {
status.getCurrentPool().setName(data.getName());
status.getCurrentPool().setConfigurationName(ids[0].getModuleID());
status.getCurrentPool().setFinished(true);
response.setRenderParameter(MODE_KEY, IMPORT_STATUS_MODE);
}
log.info("Deployment completed successfully!");
}
} else if (po.getDeploymentStatus().isFailed()) {
data.deployError = "Unable to deploy: " + data.name;
response.setRenderParameter(MODE_KEY, EDIT_MODE);
log.info("Deployment Failed!");
}
}
} catch (Exception e) {
log.error("Unable to save connection pool", e);
} finally {
if (mgr != null) mgr.release();
}
} else { // We're saving updates to an existing pool
if (planOnly) {
throw new UnsupportedOperationException("Can't update a plan for an existing deployment");
}