Package org.apache.openjpa.jdbc.schema

Examples of org.apache.openjpa.jdbc.schema.Table


        info.assertNoForeignKey(cls, !adapt);
        info.assertNoIndex(cls, false);
        info.assertNoUnique(cls, false);

        // find class table
        Table table = info.getTable(cls, adapt);

        // find primary key column
        Column[] pkCols = null;
        if (cls.getIdentityType() == cls.ID_DATASTORE) {
            Column id = new Column();
            id.setName("id");
            id.setJavaType(JavaTypes.LONG);
            if (cls.getIdentityStrategy() == ValueStrategies.AUTOASSIGN)
                id.setAutoAssigned(true);
            id.setNotNull(true);
            pkCols = info.getDataStoreIdColumns(cls, new Column[]{ id },
                table, adapt);
            cls.setPrimaryKeyColumns(pkCols);
            cls.setColumnIO(info.getColumnIO());
        }
        cls.setTable(table);

        // add a primary key if we don't have one already
        PrimaryKey pk = table.getPrimaryKey();
        if (pk == null) {
            String pkname = null;
            if (adapt)
                pkname = cls.getMappingRepository().getMappingDefaults().
                    getPrimaryKeyName(cls, table);
            pk = table.addPrimaryKey(pkname);
            pk.setLogical(!adapt);
            if (pkCols != null)
                pk.setColumns(pkCols);
        }
View Full Code Here


            given = given.substring(dotIdx + 1);
        }

        // look for named table using full name and findTable, which allows
        // the dynamic schema factory to create the table if needed
        Table table = group.findTable(fullName);
        if (table != null)
            return table;
        if (!adapt)
            throw new MetaDataException(_loc.get("bad-table", given, context));
View Full Code Here

                    + "-no-index-cols", context));
            return null;
        }

        // look for an existing index on these columns
        Table table = cols[0].getTable();
        Index[] idxs = table.getIndexes();
        Index exist = null;
        for (int i = 0; i < idxs.length; i++) {
            if (idxs[i].columnsMatch(cols)) {
                exist = idxs[i];
                break;
            }
        }

        // remove existing index?
        if (!_canIdx) {
            if (exist == null)
                return null;
            if (!adapt)
                throw new MetaDataException(_loc.get(prefix + "-index-exists",
                    context));
            table.removeIndex(exist);
            return null;
        }

        // if we have an existing index, merge given info into it
        if (exist != null) {
            if (_idx != null && _idx.isUnique() && !exist.isUnique()) {
                if (!adapt)
                    throw new MetaDataException(_loc.get(prefix
                        + "-index-not-unique", context));
                exist.setUnique(true);
            }
            return exist;
        }

        // if no defaults return null
        MappingRepository repos = (MappingRepository) context.getRepository();
        boolean fill = repos.getMappingDefaults().defaultMissingInfo();
        if (_idx == null && (tmplate == null || (!adapt && !fill)))
            return null;

        String name = null;
        boolean unq;
        if (_idx != null) {
            name = _idx.getName();
            unq = _idx.isUnique();
        } else
            unq = tmplate.isUnique();

        // if no name provided by user info, make one
        if (name == null) {
            if (tmplate != null)
                name = tmplate.getName();
            else {
                name = cols[0].getName();
                name = repos.getDBDictionary().getValidIndexName(name, table);
            }
        }

        Index idx = table.addIndex(name);
        idx.setUnique(unq);
        idx.setColumns(cols);
        return idx;
    }
View Full Code Here

                    + "-no-unique-cols", context));
            return null;
        }

        // look for an existing constraint on these columns
        Table table = cols[0].getTable();
        Unique[] unqs = table.getUniques();
        Unique exist = null;
        for (int i = 0; i < unqs.length; i++) {
            if (unqs[i].columnsMatch(cols)) {
                exist = unqs[i];
                break;
            }
        }

        // remove existing unique?
        if (!_canUnq) {
            if (exist == null)
                return null;
            if (!adapt)
                throw new MetaDataException(_loc.get(prefix
                    + "-unique-exists", context));
            table.removeUnique(exist);
            return null;
        }

        // no defaults; return existing constraint (if any)
        if (tmplate == null && _unq == null)
            return exist;

        MappingRepository repos = (MappingRepository) context.getRepository();
        if (exist != null) {
            if (_unq != null && _unq.isDeferred() && !exist.isDeferred()) {
                Log log = repos.getLog();
                if (log.isWarnEnabled())
                    log.warn(_loc.get(prefix + "-defer-unique", context));
            }
            return exist;
        }

        // dict can't handle unique constraints?
        DBDictionary dict = repos.getDBDictionary();
        if (_unq != null && !dict.supportsUniqueConstraints) {
            Log log = repos.getLog();
            if (log.isWarnEnabled())
                log.warn(_loc.get(prefix + "-unique-support", context));
            return null;
        }

        boolean fill = repos.getMappingDefaults().defaultMissingInfo();
        if (!adapt && !fill && _unq == null)
            return null;

        String name;
        boolean deferred;
        if (_unq != null) {
            name = _unq.getName();
            deferred = _unq.isDeferred();
        } else {
            name = tmplate.getName();
            deferred = tmplate.isDeferred();
        }

        if (deferred && !dict.supportsDeferredConstraints) {
            Log log = repos.getLog();
            if (log.isWarnEnabled())
                log.warn(_loc.get(prefix + "-create-defer-unique",
                    context, dict.platform));
            deferred = false;
        }

        Unique unq = table.addUnique(name);
        unq.setDeferred(deferred);
        unq.setColumns(cols);
        return unq;
    }
View Full Code Here

            given, def, inversable, adapt);
        _join = JOIN_FORWARD;

        // establish local table using any join between two columns; if we only
        // find constant joins, then keep default local table (directionless)
        Table local = table;
        Table foreign = rel.getTable();
        Table tmp;
        boolean constant = false;
        boolean localSet = false;
        for (int i = 0; i < joins.length; i++) {
            if (joins[i][1]instanceof Column) {
                tmp = ((Column) joins[i][0]).getTable();
View Full Code Here

            throw new MetaDataException(_loc.get(prefix + "-no-fkcol-name",
                context));

        // check to see if the column isn't in the expected table; it might
        // be an inverse join or a join to a base class of the target type
        Table local = table;
        Table foreign = rel.getTable();
        boolean fullName = false;
        boolean inverse = false;
        if (name != null) {
            int dotIdx = name.lastIndexOf('.');
            if (dotIdx != -1) {
                // allow use of '.' without prefix to mean "use expected
                // foreign table"
                if (dotIdx == 0)
                    local = foreign;
                else
                    local = findTable(context, name.substring(0, dotIdx),
                        local, foreign, null);
                fullName = true;
                name = name.substring(dotIdx + 1);

                // if inverse join, then swap local and foreign tables
                if (local != table) {
                    foreign = table;
                    inverse = true;
                }
            }
        }
        boolean forceInverse = !fullName && _join == JOIN_INVERSE;
        if (forceInverse) {
            local = foreign;
            foreign = table;
            inverse = true;
        }

        // determine target
        String targetName = given.getTarget();
        Object target = null;
        Table ttable = null;
        boolean constant = false;
        boolean fullTarget = false;
        if (targetName == null && given.getTargetField() != null) {
            ClassMapping tcls = (inverse) ? cls : rel;
            String fieldName = given.getTargetField();
View Full Code Here

            // get first table alias before updating path; if there is a from
            // select then we shouldn't actually create a join object, since
            // the joins will all be done in the from select
            boolean createJoin = _sel._from == null;
            Table table1 = null;
            int alias1 = -1;
            if (createJoin) {
                table1 = (inverse) ? fk.getPrimaryKeyTable() : fk.getTable();
                alias1 = _sel.getTableIndex(table1, this, true);
            }

            // update the path with the relation name before getting pk alias
            this.append(name);
            this.append(var);
            if (toMany) {
                _sel._flags |= IMPLICIT_DISTINCT;
                _sel._flags |= TO_MANY;
            }
            _outer = outer;

            if (createJoin) {
                Table table2 = (inverse) ? fk.getTable()
                    : fk.getPrimaryKeyTable();
                int alias2 = _sel.getTableIndex(table2, this, true);
                Join j = new Join(table1, alias1, table2, alias2, fk, inverse);
                j.setType((outer) ? Join.TYPE_OUTER : Join.TYPE_INNER);
View Full Code Here

        // differentiate between secondary table joins and relations built
        // around an inverse key: check to see if we're mapped as a secondary
        // table join but we're in the table of the related type, and if so
        // switch our join mapping info to our value mapping info
        String tableName = field.getMappingInfo().getTableName();
        Table table = field.getTypeMapping().getTable();
        ValueMappingInfo vinfo = field.getValueInfo();
        if (tableName != null && table != null
            && (tableName.equalsIgnoreCase(table.getName())
            || tableName.equalsIgnoreCase(table.getFullName()))) {
            vinfo.setJoinDirection(MappingInfo.JOIN_INVERSE);
            vinfo.setColumns(field.getMappingInfo().getColumns());
            field.getMappingInfo().setTableName(null);
            field.getMappingInfo().setColumns(null);
        }
View Full Code Here

                sql.append(where);
            }
            return sql;
        }

        Table table = mapping.getTable();
        String tableName = getFullName(table, false);

        // only use a  subselect if the where is not empty; otherwise
        // an unqualified delete or update will work
        if (sel.getWhere() == null || sel.getWhere().isEmpty()) {
            sql.append(tableName);
            appendUpdates(sel, store, sql, params, updateParams, false);
            return sql;
        }

        // we need to use a subselect if we are to bulk delete where
        // the select includes multiple tables; if the database
        // doesn't support it, then we need to sigal this by returning null
        if (!supportsSubselect || !supportsCorrelatedSubselect)
            return null;

        Column[] pks = mapping.getPrimaryKeyColumns();
        sel.clearSelects();
        sel.setDistinct(true);

        // if we have only a single PK, we can use a non-correlated
        // subquery (using an IN statement), which is much faster than
        // a correlated subquery (since a correlated subquery needs
        // to be executed once for each row in the table)
        if (pks.length == 1) {
            sel.select(pks[0]);
            sql.append(tableName);
            appendUpdates(sel, store, sql, params, updateParams, false);
            sql.append(" WHERE ").
                append(pks[0]).append(" IN (").
                append(sel.toSelect(false, null)).append(")");
        } else {
            sel.clearSelects();
            sel.setDistinct(false);

            // since the select is using a correlated subquery, we
            // only need to select a bogus virtual column
            sel.select("1", null);

            // add in the joins to the table
            Column[] cols = table.getPrimaryKey().getColumns();
            SQLBuffer buf = new SQLBuffer(this);
            buf.append("(");
            for (int i = 0; i < cols.length; i++) {
                if (i > 0)
                    buf.append(" AND ");
View Full Code Here

    /**
     * Create a new table from the information in the schema metadata.
     */
    protected Table newTable(ResultSet tableMeta)
        throws SQLException {
        Table t = new Table();
        t.setName(tableMeta.getString("TABLE_NAME"));
        return t;
    }
View Full Code Here

TOP

Related Classes of org.apache.openjpa.jdbc.schema.Table

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.