MONGO
permitted SchemaTypes
- String
- Number
- Date
- Buffer
- Boolean
- Mixed
- ObjectId
- Array
- Decimal128
- Map
Valid schema options new Schema({...}, options}
- autoIndex #true
- autoCreate
- bufferCommands
- bufferTimeoutMS
- capped
- collection
- discriminatorKey
- id
- _id
- minimize
- read
- writeConcern
- shardKey
- strict
- strictQuery
- toJSON
- toObject
- typeKey
- useNestedStrict
- validateBeforeSave
- versionKey
- optimisticConcurrency
- collation
- timeseries
- selectPopulatedPaths
- skipVersioning
- timestamps
- storeSubdocValidationError
MongoDB connect
// MongoDB+srv connection string for cloud-based databases such as MongoDB Atlas
const uri = "mongodb+srv://username:password@cluster0.example.com/mydb";
const client = new MongoClient(uri, { useNewUrlParser: true, useUnifiedTopology: true });
// Advanced Connection String Options
mongodb://username:password@host1:27017,host2:27018,host3:27019/mydb?replicaSet=myReplicaSet&authSource=admin&ssl=true&connectTimeoutMS=300000&socketTimeoutMS=30000&readPreference=primaryPreferred
// enable TLS on a MongoDB server
mongod --tlsMode requireTLS --tlsCertificateKeyFile /path/to/your/cert.pem
// check db size
use yourDatabaseName
var stats = db.stats()
print("Database Size: " + (stats.dataSize / (1024 * 1024 * 1024)).toFixed(2) + " GB"); // Database Size: 1.75 GB
print("Storage Size: " + (stats.storageSize / (1024 * 1024 * 1024)).toFixed(2) + " GB"); // Storage Size: 2.00 GB
// db.collection.stats()
{
size: 12345646,
totalIndexSize: 1345641561,
}
// user access
$ mongo
$ use admin
db.createUser({
user: "myAdminUser",
pwd: "myAdminPassword",
roles: [{ role: "userAdminAnyDatabase", db: "admin" }, "readWriteAnyDatabase"]
});
db.createUser({
user: "myReadOnlyUser",
pwd: "password",
roles: [
{ role: "read", db: "myDatabase" },
]
});
db.grantRolesToUser('myReadOnlyUser', [
{ role: 'readWrite', db: 'myDatabase' }
])
db.createRole({
role: 'readAndBackup',
privileges: [
{ resource: { db: 'myDatabase', collection: '' }, actions: [ 'find', 'backup' ] }
],
roles: []
})
db.grantRolesToUser('myReadOnlyUser', ['readAndBackup'])
db.revokeRolesFromUser('myReadOnlyUser', ['readWrite'])
// view user roles
$ use myDatabase
$ db.getUsers()
$ db.getRoles()
// change user password
$ db.changeUserPassword("root", "newpassword")
$ db.updateUser("root", {pwd: "newRootPassword"});
// then connect
$ mongo -u myAdminUser -p myAdminPassword --authenticationDatabase admin
// Constructing ObjectId with Specific Time
const date = new Date('2023-01-01T00:00:00Z');
const objectIdFromDate = ObjectId(Math.floor(date.getTime() / 1000).toString(16) + '0000000000000000');
const timestamp = document._id.getTimestamp();
// returns JavaScript `Date` object
// advanced date query
db.collection.aggregate([
{
$project: {
year: { $year: '$date' },
month: { $month: '$date' },
day: { $dayOfMonth: '$date' },
hour: { $hour: '$date' }
}
},
{
$match: {
hour: 10
}
}
])
db.collection.findAndModify({
query: { <query> },
update: { <update> },
remove: <boolean>,
new: <boolean>,
upsert: <boolean>,
sort: { <fields> },
fields: { <fields> }
})
{
"_id": 1,
"title": "First Post",
"comments": [
{ "author": "Jane", "text": "Great post!" }
]
}
db.posts.findOneAndUpdate(
{ _id: 1, "comments.author": "Jane" },
{ $set: { "comments.$.author": "John" } },
{ returnNewDocument: true }
);
mongodb array operations
$push
: Adds an element to an array.$addToSet
: Adds an element to an array if it doesn’t exist already.$pull
: Removes an element from an array.$pop
: Removes the first or last element in an array.
mongodb index operation
The ESR (Equality, Sort, Range) Rule |
Single Field
: Indexing individual fields in a document.Compound
: Indexing multiple fields within a document.Multikey
: Indexing fields that contain array values.Geospatial
: Indexing geospatial data for location-based querying.Text
: Indexing text for search functionality.
// create single field index
db.users.createIndex({ username: 1 })
// Verifying the Index
db.users.getIndexes()
// To check the size of your indexes
db.collection.totalIndexSize()
// 4617080000 // 4.3 gigabytes
// create compound index
db.users.createIndex({ username: 1, email: 1 })
//
Executing Mongodb file script & geneal management
// Access shell
mongo
// check configuration of the MongoDB server
db.serverCmdLineOpts()
// Load and execute the script
load("/path/to/your/queries.js")
// Passing Command-Line Arguments
mongo --nodb --shell --eval "var name='MongoDB'; var age=10" /path/to/your/queries_with_args.js
// Add a cron job that runs 'queries.js' every day at 3 a.m.
0 3 * * * mongo database_name /path/to/your/queries.js
// Stopping MongoDB on Unix-like systems
sudo service mongod stop
// Stopping MongoDB on Windows
net stop MongoDB
// Move the existing data to the new directory, ensuring permissions are adequate
sudo mv /data/db /new/directory/path
sudo chown -R mongodb:mongodb /new/directory/path
// restart with new dbpath
mongod --dbpath /new/directory/path
// For Windows, edit the MongoDB/config-file and adjust the dbpath value:
dbpath=C:\new\directory\path
// mv db by script
#!/bin/bash
stop_mongodb() {
echo "Stopping MongoDB service..."
sudo service mongod stop
}
move_data_files() {
echo "Moving MongoDB data files..."
sudo mv /data/db /new/directory/path
sudo chown -R mongodb:mongodb /new/directory/path
}
update_db_path() {
echo "Updating dbPath in MongoDB config..."
sudo sed -i 's|/data/db|/new/directory/path|g' /etc/mongod.conf
}
start_mongodb() {
echo "Starting MongoDB service with new dbPath..."
sudo service mongod start
}
stop_mongodb
move_data_files
update_db_path
start_mongodb
echo "MongoDB data migration completed."
Sync to ES
replica configuration
rs.status()
rs.reconfig({ _id: "rs0", members: [{ _id: 0, host: "192.168.1.9:8094" }, { _id: 1, host: "192.168.1.9:8095" }, { _id: 2, host: "192.168.1.9:8096" }]}, { force: true })
mongodump
& mongorestore
on a replica set
mongorestore --uri="mongodb://192.168.0.58:8054,192.168.0.58:8055,192.168.0.58:8056/dbanme?replicaSet=rs0" --gzip dump/
mongodump --uri="mongodb://<db_user>:<db_passwd>@106.14.254.1:8054,106.14.254.1:8055,106.14.254.1:8056/dbanme?replicaSet=rs0" --collection=invoices --out=dump
[db operate]
# drop database
mongo <dbname> --eval "db.dropDatabase()"
# in mongodb console
> use mydb;
> db.dropDatabase();
docker-compose
version: "3.6"
services:
mongo1:
image: "mongo:4.2"
container_name: mongo1
hostname: mongo1.example.com
command: ["--replSet", "rs", "--bind_ip_all", "--wiredTigerCacheSizeGB", "1"]
ports:
- 8031:27017
volumes:
- /mnt/ssd/mongo-replica/mongod.conf:/etc/mongod.conf
- /mnt/ssd/mongo-replica/node1:/data/db
networks:
my-net:
ipv4_address: 172.16.8.6
mongo2:
image: "mongo:4.2"
container_name: mongo2
hostname: mongo2.example.com
command: ["--replSet", "rs", "--bind_ip_all", "--wiredTigerCacheSizeGB", "1"]
ports:
- 8032:27017
volumes:
- /mnt/ssd/mongo-replica/mongod.conf:/etc/mongod.conf
- /mnt/ssd/mongo-replica/node2:/data/db
networks:
my-net:
ipv4_address: 172.16.8.7
mongo3:
image: "mongo:4.2"
container_name: mongo3
hostname: mongo3.example.com
command: ["--replSet", "rs", "--bind_ip_all", "--wiredTigerCacheSizeGB", "1"]
ports:
- 8033:27017
volumes:
- /mnt/ssd/mongo-replica/mongod.conf:/etc/mongod.conf
- /mnt/ssd/mongo-replica/node3:/data/db
networks:
my-net:
ipv4_address: 172.16.8.8
postgres:
image: postgres:alpine
container_name: postgres
environment:
- POSTGRES_PASSWORD=$POSTGRES_PASSWORD
- POSTGRES_DB=$POSTGRES_DB
- POSTGRES_USER=$POSTGRES_USER
restart: always
volumes:
- /mnt/ssd/postgres:/var/lib/postgresql/data
ports:
- "15432:5432"
networks:
my-net:
ipv4_address: 172.16.8.9
networks:
my-net:
name: "net_optimize"
ipam:
driver: default
config:
- subnet: "172.16.8.0/24"