https://www.schemacrawler.com/getting-started.html

https://killercoda.com/schemacrawler

## Get a list of Tables
schemacrawler \
  --server=sqlite \
  --database=sc.db \
  --info-level=minimum \
  --command=list

## Get Details of Tables
schemacrawler \
  --server=sqlite \
  --database=sc.db \
  --info-level=standard \
  --command=schema

## Limit Tables in Output
schemacrawler \
  --server=sqlite \
  --database=sc.db \
  --tables=.*Authors \
  --no-info \
  --info-level=maximum \
  --command=details

## Serach for Tables
schemacrawler \
  --server=sqlite \
  --database=sc.db \
  --grep-columns=.*Data \
  --no-info \
  --info-level=standard \
  --command=schema


schemacrawler \
  --server=sqlite \
  --database=sc.db \
  --grep-columns='(?i).*data' \
  --no-info \
  --info-level=standard \
  --command=schema

## Run Other Commands
schemacrawler \
  --server=sqlite \
  --database=sc.db \
  --grep-columns=.*Data \
  --no-info \
  --info-level=standard \
  --command=count

schemacrawler \
  --server=sqlite \
  --database=sc.db \
  --grep-columns=.*Data \
  --no-info \
  --info-level=standard \
  --command=dump

## Find Related Tables
schemacrawler \
  --server=sqlite \
  --database=sc.db \
  --no-info \
  --grep-columns ".*Authors.*" \
  --parents=1 \
  --info-level=standard \
  --command=brief

schemacrawler –shell
connect –server=sqlite –database=sc.db
load –info-level=minimum
execute –command=list
load –info-level=standard
execute –command=schema
—————-
limit –tables=.*Authors
load –info-level=maximum
execute –command=details –no-info
—————- # Remove Table Limits
sweep
connect --server=sqlite --database=sc.db
load --info-level=standard
grep –grep-columns=.*Data
execute –command=schema
—————– # Run Other Commands
execute –command=count
execute –command=dump
—————– # Find Related Tables
sweep
connect –server=sqlite –database=sc.db
grep –grep-columns .*Authors.*
filter –parents=1
load –info-level=standard
execute –command=brief –no-info

SchemaCrawler –shell

schemacrawler –shell
connect –server=sqlite –database=sc.db
load –info-level=standard
execute –command=schema –no-info –output-file=./share/schema.txt

execute –command=serialize –serialization-format=json –output-file=./share/schema.json

execute –command=serialize –serialization-format=yaml –output-file=./share/schema.yaml

execute –command=schema –output-file=./share/schema.html

execute –command=schema –output-file=./share/schema.png

execute –command=script –script=./share/tables.js –scripting-language=javascript –output-file=./share/tables.js.txt

execute –command=script –script=./share/tables.py –scripting-language=python –output-file=./share/tables.py.txt

execute –command=template –template=./share/tables.mustache –templating-language=mustache –output-file=./share/tables.mustache.txt

Output Files

var printChildren = function()
{
  var forEach = Array.prototype.forEach;

  print(catalog.getCrawlInfo());

  forEach.call(catalog.getTables(), function(table)
  {
    print('');
    print(table.getFullName());
    var children = table.getReferencingTables();
    forEach.call(children, function(childTable)
    {
      print("  [child] " + childTable.getFullName());
    });
  });
};

printChildren();
# tables.js
for table in catalog.tables:
  print ''
  print table.fullName
  for childTable in table.referencingTables:
    print "  [child] " + childTable.fullName
tables.py
require 'java'

def schemacrawler
  Java::Schemacrawler
end

puts catalog.crawlInfo

for table in catalog.tables
  puts ''
  puts table.fullName
  for childTable in table.referencingTables
    puts "  [child] " + childTable.fullName
  end
end
tables.rb
println catalog.crawlInfo

for (table in catalog.tables)
{
  println ''
  println table.fullName
  for (childTable in table.getReferencingTables())
  {
    println "  [child] " + childTable.fullName
  }
}
tables.groovy
${catalog.crawlInfo}

<#list catalog.schemas as schema>
<#list catalog.getTables(schema) as table>- ${table}
<#list table.columns as column> - ${column}
</#list></#list></#list>
tables.ftl
{{#catalog.tables}}
- {{&fullName}}
{{#columns}}
 - {{&name}}
{{/columns}}
{{/catalog.tables}}
tables.mustache

<!DOCTYPE html SYSTEM "http://www.thymeleaf.org/dtd/xhtml1-strict-thymeleaf-4.dtd">

<html xmlns="http://www.w3.org/1999/xhtml"
      xmlns:th="http://www.thymeleaf.org">

  <head>
    <title>SchemaCrawler Schema</title>
    <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
  </head>

  <body>

    <pre th:text="${catalog.crawlInfo}">SchemaCrawler Information</pre>

    <span th:each="schema: ${catalog.schemas}">
      <h1 th:text="${schema.fullName}">Schema</h1>
        <span th:each="table: ${catalog.getTables(schema)}">
          <h2 th:text="${table.fullName}">Table</h2>
          <ol th:each="column: ${table.columns}">
            <li th:text="${column.name}">Columns</li>
          </ol>
        </span>
    </span>

  </body>

</html>
tables.thymeleaf

ubuntu $ #!/bin/sh
ubuntu $
ubuntu $ # Setup a shared directory which will be mounted on the Docker container
ubuntu $ # and make the "users" group the owner
ubuntu $ adduser root users
Adding user <code>root' to groupusers' …
Adding user root to group users
Done.
ubuntu $ mkdir /root/share
mkdir: cannot create directory '/root/share': File exists
ubuntu $ chown :100 /root/share
ubuntu $ chmod 777 /root/share
ubuntu $ chmod g+s /root/share
ubuntu $
ubuntu $ # Start a webserver where output files will be created
ubuntu $ python3 -m http.server 80 --directory /root/share > ~/webserver.log 2>&1 &
[1] 2701
ubuntu $
ubuntu $ # Run Docker container
ubuntu $ docker run \
-v /root/share:/home/schcrwlr/share \
--name schemacrawler \
--rm -i -t \
schemacrawler/schemacrawler:v16.21.2 \
bash
Docker Setup
<strong>choco install schemacrawler</strong>

Chocolatey v1.1.0
2 validations performed. 1 success(es), 1 warning(s), and 0 error(s).

Validation Warnings:
A pending system reboot request has been detected, however, this is
being ignored due to the current Chocolatey configuration. If you
want to halt when this occurs, then either set the global feature
using:
choco feature enable -name=exitOnRebootDetected
or pass the option --exit-when-reboot-detected.
Installing the following packages:
schemacrawler
By installing, you accept licenses for the packages.
Progress: Downloading schemacrawler 16.21.2… 100%
schemacrawler v16.21.2 [Approved]
schemacrawler package files install completed. Performing other installation steps.
The package schemacrawler wants to run 'chocolateyinstall.ps1'.
Note: If you don't run this script, the installation will fail.
Note: To confirm automatically next time, use '-y' or consider:
choco feature enable -n allowGlobalConfirmation
Do you want to run the script?([Y]es/[A]ll - yes to all/[N]o/[P]rint): Y
Downloading schemacrawler
from 'https://github.com/schemacrawler/SchemaCrawler/releases/download/v16.21.2/schemacrawler-16.21.2-bin.zip'
Progress: 100% - Completed download of C:\Users\win10\AppData\Local\Temp\chocolatey\schemacrawler\16.21.2\schemacrawler-16.21.2-bin.zip (288.37 MB).
Download of schemacrawler-16.21.2-bin.zip (288.37 MB) completed.
Hashes match.
Extracting C:\Users\win10\AppData\Local\Temp\chocolatey\schemacrawler\16.21.2\schemacrawler-16.21.2-bin.zip to C:\ProgramData\chocolatey\lib\schemacrawler…
C:\ProgramData\chocolatey\lib\schemacrawler
Added C:\ProgramData\chocolatey\bin\schemacrawler.exe shim pointed to '..\lib\schemacrawler\schemacrawler-16.21.2-bin\bin\schemacrawler.cmd'.
Environment Vars (like PATH) have changed. Close/reopen your shell to
see the changes (or in powershell/cmd.exe just type <code>refreshenv).
The install of schemacrawler was successful.
Software installed to 'C:\ProgramData\chocolatey\lib\schemacrawler'
Chocolatey installed 1/1 packages.
See the log for details (C:\ProgramData\chocolatey\logs\chocolatey.log).
Windows Install

wget https://github.com/ivanceras/sakila/raw/main/sqlite-sakila-db/sakila.db

# List all tables
schemacrawler \
  --url "jdbc:sqlite:sakila.db" \
  --info-level minimum \
  --command list

# List film related tables
schemacrawler \
  --url "jdbc:sqlite:sakila.db" \
  --info-level minimum \
  --command list \
  --grep-tables film.*

# Get details of the "film" table
schemacrawler \
  --url "jdbc:sqlite:sakila.db" \
  --info-level maximum \
  --command details \
  --grep-tables film \
  --no-info

# Visualize the "film" table
schemacrawler \
  --url "jdbc:sqlite:sakila.db" \
  --info-level maximum \
  --command details \
  --grep-tables film \
  --output-file share/film-table.pdf

# Explore tables related to the "film" table
schemacrawler \
  --url "jdbc:sqlite:sakila.db" \
  --info-level standard \
  --command schema \
  --grep-tables film \
  --children 1 \
  --output-file share/film-table-children.pdf

# Find tables with a "film_id" column
schemacrawler \
  --url "jdbc:sqlite:sakila.db" \
  --info-level standard \
  --command schema \
  --grep-columns .*\.film_id \
  --output-file share/film_id-tables.pdf

# Again with weak-association: 
# -- Weak associations are drawn in dotted lines, to columns with unique constraints.
schemacrawler \
  --url "jdbc:sqlite:sakila.db" \
  --info-level standard \
  --command schema \
  --grep-columns .*\.film_id \
  --weak-associations \
  --output-file share/film-table-weak-associations.pdf

# Find Database Schema Design Issues
# --Review the design issues that SchemaCrawler has found.
# --If you go back and look at the details of the "CREATE" statement for the "films" table, you will see the reason why SchemaCrawler reports "column where NULL may be intended". 
# --For example, "original_language_id SMALLINT DEFAULT NULL" is better defined simply as "original_language_id SMALLINT".
#####################################################
schemacrawler \
  --url "jdbc:sqlite:sakila.db" \
  --no-info \
  --info-level standard \
  --command lint \
  --grep-columns .*\.film_id

# Explore Data
schemacrawler \
  --url "jdbc:sqlite:sakila.db" \
  --no-info \
  --info-level minimum \
  --command count \
  --grep-tables film.*

# Using Grep
schemacrawler \
  --url "jdbc:sqlite:sakila.db" \
  --info-level standard \
  --command dump \
  --grep-tables film \
  --output-file share/film-data.html

# SVG Diagram
schemacrawler \
  --url "jdbc:sqlite:sakila.db" \
  --no-info \
  --info-level maximum \
  --command details \
  --grep-tables film \
  --output-format htmlx \
  --output-file share/film-table.html
Working with Database
wget -O sakila2.db https://github.com/bradleygrant/sakila-sqlite3/raw/main/sakila_main.db

schemacrawler \
  --url "jdbc:sqlite:sakila.db" \
  --info-level standard \
  --command schema \
  --no-info \
  --output-file share/sakila.txt

schemacrawler \
  --url "jdbc:sqlite:sakila2.db" \
  --info-level standard \
  --command schema \
  --no-info \
  --output-file share/sakila2.txt

diff -U 0 share/sakila.txt share/sakila2.txt
Compare Differences
Evolve Your Database Schema
SchemaCrawler can execute Python, Ruby or Groovy scripts against your database. We have provided a Python script called "dbml.py" that outputs your database schema in DBML (Database Markup Language) so that it can be edited online using dbdiagram.io.

Generate DBML for your database, run:

schemacrawler \
  --url "jdbc:sqlite:sakila.db" \
  --info-level standard \
  --command script \
  --table-types TABLE \
  --no-info \
  --script=./share/dbml.py \
  --scripting-language=python \
  --output-file share/sakila.dbml
Check that the output file was generated, run: ls -lt share Refresh the "File Browser", and view the generated DBML file. Copy the contents of the file, and paste into dbdiagram.io. You can continue to design and modify your schema using this tool.
Running Scripts

Leave a Reply