table of Contents
Delete records of a certain collection
background
Summarize the notes of learning MongoDB in the early summer. This is a NoSQL database that can store large amounts of data, supports distributed, and the basic storage format is json key-value pairs.
installation
[root@localhost szc]# vim /etc/yum.repos.d/mongodb-org-3.4.repo
The content is as follows
name=MongoDB Repository
baseurl=https://repo.mongodb.org/yum/redhat/$releasever/mongodb-org/3.4/x86_64/
gpgcheck=0
enabled=1
gpgkey=https://www.mongodb.org/static/pgp/server-3.4.asc
2. Install it, if the download is slow, try several times if it is not possible at a time
[root@localhost szc]# yum -y install mongodb-org
3. After completion, check the location
[root@localhost szc]# whereis mongod
mongod: /usr/bin/mongod /etc/mongod.conf /usr/share/man/man1/mongod.1
4. Modify the /etc/mongod.conf configuration file and comment out bindip to allow remote access
[root@localhost szc]# vim /etc/mongod.conf
[root@localhost szc]# cat /etc/mongod.conf
# mongod.conf
# for documentation of all options, see:
# http://docs.mongodb.org/manual/reference/configuration-options/
# where to write logging data.
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod.log # 日志文件路径
# Where and how to store data.
storage:
dbPath: /var/lib/mongo # 数据文件目录
journal:
enabled: true
# engine:
# mmapv1:
# wiredTiger:
# how the process runs
processManagement:
fork: true # fork and run in background
pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile
# network interfaces
net:
port: 27017
#bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces.
5. Start the mongodb server and set it to boot
[root@localhost szc]# systemctl start mongod.service
[root@localhost szc]# systemctl enable mongod.service
6. Close the mongodb server
systemctl stop mongod.service
7. Start the client
[root@localhost szc]# mongo
Client command
Database command
> show dbs
admin 0.000GB
config 0.000GB
local 0.000GB
test 0.000GB
Use a database (no need to create)
> use test
switched to db test
The newly created database must be inserted into it before it can be displayed in show dbs (assuming a new test2 database is created)
> db.createCollection("user")
{ "ok" : 1 }
> show dbs
admin 0.000GB
local 0.000GB
test2 0.000GB
Delete the current database
> db.dropDatabase()
{ "ok" : 1 }
Set command
> show collections
student
user
Create collection
> db.createCollection("article_url")
{ "ok" : 1 }
Optional parameter, capped represents the fixed collection capacity, size specifies the capacity (in bytes)
> db.createCollection("user", {capped:true, size:10})
{ "ok" : 1 }
Delete collection
> db.user.drop()
true
Data type and data operation
type of data
String: utf-8 string
Integer: 32-bit or 64-bit integer
> new Date("2020-06-15")
ISODate("2020-06-15T00:00:00Z")
Not passing ginseng is now Greenwich time
> new Date()
ISODate("2020-06-15T03:35:32.016Z")
Data manipulation
> db.test2.insert({"name":"szc", "age":23})
WriteResult({ "nInserted" : 1 })
> db.test2.find()
{ "_id" : ObjectId("5ee6ed5e757d5ca426d608b4"), "name" : "szc", "age" : 23 }
Insert another one, you can see that ObjectId adds 1
> db.test2.insert({"name":"jason", "age":22})
WriteResult({ "nInserted" : 1 })
> db.test2.find()
{ "_id" : ObjectId("5ee6ed5e757d5ca426d608b4"), "name" : "szc", "age" : 23 }
{ "_id" : ObjectId("5ee6edac757d5ca426d608b5"), "name" : "jason", "age" : 22 }
When inserting data, ensure that the _id is unique, but you can overwrite the old data when you save the data
> db.test2.insert({"_id":1, "name":"a", "age": 11})
WriteResult({ "nInserted" : 1 })
> db.test2.find()
{ "_id" : ObjectId("5ee6ed5e757d5ca426d608b4"), "name" : "szc", "age" : 23 }
{ "_id" : ObjectId("5ee6edac757d5ca426d608b5"), "name" : "jason", "age" : 22 }
{ "_id" : 1, "name" : "a", "age" : 11 }
> db.test2.save({"_id":1, "name":"a", "age": 12})
WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 })
> db.test2.find()
{ "_id" : ObjectId("5ee6ed5e757d5ca426d608b4"), "name" : "szc", "age" : 23 }
{ "_id" : ObjectId("5ee6edac757d5ca426d608b5"), "name" : "jason", "age" : 22 }
{ "_id" : 1, "name" : "a", "age" : 12 }
2. Update data:
> db.test2.update({"name":"a"}, {"name":"b"})
WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 })
> db.test2.find()
{ "_id" : ObjectId("5ee6ed5e757d5ca426d608b4"), "name" : "szc", "age" : 23 }
{ "_id" : ObjectId("5ee6edac757d5ca426d608b5"), "name" : "jason", "age" : 22 }
{ "_id" : 1, "name" : "b" }
It can be seen that {"_id": 1, "name": "a", "age": 12} This piece of data is overwritten to {"_id": 1, "name": "b" }, the age field is gone
> db.test2.update({"name":"jason"}, {$set:{"age":21}})
WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 })
> db.test2.find()
{ "_id" : ObjectId("5ee6ed5e757d5ca426d608b4"), "name" : "szc", "age" : 23 }
{ "_id" : ObjectId("5ee6edac757d5ca426d608b5"), "name" : "jason", "age" : 21 }
{ "_id" : 1, "name" : "b" }
Change one piece of data by default, if you want to change more than one, add multi:true
> db.test2.update({"name":"szc"}, {$set:{"name":"songzeceng"}}, {multi:true})
WriteResult({ "nMatched" : 2, "nUpserted" : 0, "nModified" : 2 })
> db.test2.find()
{ "_id" : ObjectId("5ee6ed5e757d5ca426d608b4"), "name" : "songzeceng", "age" : 23 }
{ "_id" : ObjectId("5ee6edac757d5ca426d608b5"), "name" : "jason", "age" : 21 }
{ "_id" : 1, "name" : "b" }
{ "_id" : ObjectId("5ee6f05d757d5ca426d608b6"), "name" : "songzeceng", "age" : 32 }
{ "_id" : ObjectId("5ee6f062757d5ca426d608b7"), "name" : "songzeceng", "age" : 24 }
And multi can only work with $
> db.test2.remove({"name":"songzeceng"}, {justOne:true})
WriteResult({ "nRemoved" : 1 })
> db.test2.find()
{ "_id" : ObjectId("5ee6edac757d5ca426d608b5"), "name" : "jason", "age" : 21 }
{ "_id" : 1, "name" : "b" }
{ "_id" : ObjectId("5ee6f05d757d5ca426d608b6"), "name" : "songzeceng", "age" : 32 }
{ "_id" : ObjectId("5ee6f062757d5ca426d608b7"), "name" : "songzeceng", "age" : 24 }
Without justOne, all matching data will be deleted
> db.test2.findOne({"age":17})
{ "_id" : ObjectId("5ee6f285757d5ca426d608b9"), "name" : "bob", "age" : 17 }
4.2. Operator, query all records with age field ≤ 30
> db.test2.find({"age":{$lte:30}})
{ "_id" : ObjectId("5ee6edac757d5ca426d608b5"), "name" : "jason", "age" : 21 }
{ "_id" : ObjectId("5ee6f062757d5ca426d608b7"), "name" : "songzeceng", "age" : 24 }
{ "_id" : ObjectId("5ee6f279757d5ca426d608b8"), "name" : "szc", "age" : 23 }
{ "_id" : ObjectId("5ee6f285757d5ca426d608b9"), "name" : "bob", "age" : 17 }
lte:≤,lt:<,gt:>,gte:≥,ne:≠
> db.test2.find({"age":{$in:[17, 21, 25]}})
{ "_id" : ObjectId("5ee6edac757d5ca426d608b5"), "name" : "jason", "age" : 21 }
{ "_id" : ObjectId("5ee6f285757d5ca426d608b9"), "name" : "bob", "age" : 17 }
4.4, logical operations
1) Logical AND: Just add a field directly
> db.test2.find({"age":{$in:[17, 21, 25]}, "name":"jason"})
{ "_id" : ObjectId("5ee6edac757d5ca426d608b5"), "name" : "jason", "age" : 21 }
2), logical OR: $or
> db.test2.find({$or:[{"age":{$in:[17, 21, 25]}}, {"name":"szc"}]})
{ "_id" : ObjectId("5ee6edac757d5ca426d608b5"), "name" : "jason", "age" : 21 }
{ "_id" : ObjectId("5ee6f279757d5ca426d608b8"), "name" : "szc", "age" : 23 }
{ "_id" : ObjectId("5ee6f285757d5ca426d608b9"), "name" : "bob", "age" : 17 }
4.5, regular expressions
Query data starting with s
> db.test2.find({"name":/^s/})
{ "_id" : ObjectId("5ee6f05d757d5ca426d608b6"), "name" : "songzeceng", "age" : 32 }
{ "_id" : ObjectId("5ee6f062757d5ca426d608b7"), "name" : "songzeceng", "age" : 24 }
{ "_id" : ObjectId("5ee6f279757d5ca426d608b8"), "name" : "szc", "age" : 23 }
Query data ending in ng
> db.test2.find({"name":/ng$/})
{ "_id" : ObjectId("5ee6f05d757d5ca426d608b6"), "name" : "songzeceng", "age" : 32 }
{ "_id" : ObjectId("5ee6f062757d5ca426d608b7"), "name" : "songzeceng", "age" : 24 }
You can also change // to $regex
> db.test2.find({"name":{$regex:"ng$"}})
{ "_id" : ObjectId("5ee6f05d757d5ca426d608b6"), "name" : "songzeceng", "age" : 32 }
{ "_id" : ObjectId("5ee6f062757d5ca426d608b7"), "name" : "songzeceng", "age" : 24 }
4.6, query the first two
> db.test2.find().limit(2)
{ "_id" : ObjectId("5ee6edac757d5ca426d608b5"), "name" : "jason", "age" : 21 }
{ "_id" : 1, "name" : "b" }
4.7, skip the first two
> db.test2.find().skip(2)
{ "_id" : ObjectId("5ee6f05d757d5ca426d608b6"), "name" : "songzeceng", "age" : 32 }
{ "_id" : ObjectId("5ee6f062757d5ca426d608b7"), "name" : "songzeceng", "age" : 24 }
{ "_id" : ObjectId("5ee6f279757d5ca426d608b8"), "name" : "szc", "age" : 23 }
{ "_id" : ObjectId("5ee6f285757d5ca426d608b9"), "name" : "bob", "age" : 17 }
The two can be used in conjunction to achieve page turning, and the order of skip and limit at this time does not matter, they are skipped first, and then limited
> db.test2.find().limit(2).skip(1)
{ "_id" : 1, "name" : "b" }
{ "_id" : ObjectId("5ee6f05d757d5ca426d608b6"), "name" : "songzeceng", "age" : 32 }
> db.test2.find().skip(1).limit(2)
{ "_id" : 1, "name" : "b" }
{ "_id" : ObjectId("5ee6f05d757d5ca426d608b6"), "name" : "songzeceng", "age" : 32 }
4.8, custom query, use $where to define functions
> db.test2.find({$where:function(){return this.age <= 25}})
{ "_id" : ObjectId("5ee6edac757d5ca426d608b5"), "name" : "jason", "age" : 21 }
{ "_id" : ObjectId("5ee6f062757d5ca426d608b7"), "name" : "songzeceng", "age" : 24 }
{ "_id" : ObjectId("5ee6f279757d5ca426d608b8"), "name" : "szc", "age" : 23 }
{ "_id" : ObjectId("5ee6f285757d5ca426d608b9"), "name" : "bob", "age" : 17 }
Filter the fields to be returned, 1 means return, 0 and not write means no return
> db.test2.find({$where:function(){return this.age <= 25}}, {name:1, _id:0})
{ "name" : "jason" }
{ "name" : "songzeceng" }
{ "name" : "szc" }
{ "name" : "bob" }
But _id is not written but it is returned
> db.test2.find({$where:function(){return this.age <= 25}}, {name:1})
{ "_id" : ObjectId("5ee6edac757d5ca426d608b5"), "name" : "jason" }
{ "_id" : ObjectId("5ee6f062757d5ca426d608b7"), "name" : "songzeceng" }
{ "_id" : ObjectId("5ee6f279757d5ca426d608b8"), "name" : "szc" }
{ "_id" : ObjectId("5ee6f285757d5ca426d608b9"), "name" : "bob" }
Output all documents and map fields
> db.test2.find({}, {_id:0, name:1})
{ "name" : "jason" }
{ "name" : "b" }
{ "name" : "songzeceng" }
{ "name" : "songzeceng" }
{ "name" : "szc" }
{ "name" : "bob" }
5. Sorting, using the sort function, 1 means ascending order, -1 means descending order
> db.test2.find().sort({age:1})
{ "_id" : 1, "name" : "b" }
{ "_id" : ObjectId("5ee6f285757d5ca426d608b9"), "name" : "bob", "age" : 17 }
{ "_id" : ObjectId("5ee6edac757d5ca426d608b5"), "name" : "jason", "age" : 21 }
{ "_id" : ObjectId("5ee6f279757d5ca426d608b8"), "name" : "szc", "age" : 23 }
{ "_id" : ObjectId("5ee6f062757d5ca426d608b7"), "name" : "songzeceng", "age" : 24 }
{ "_id" : ObjectId("5ee6f05d757d5ca426d608b6"), "name" : "songzeceng", "age" : 32 }
When the sort field is non-numeric, sort in alphabetical order
> db.test2.find().sort({name:-1})
{ "_id" : ObjectId("5ee6f279757d5ca426d608b8"), "name" : "szc", "age" : 23 }
{ "_id" : ObjectId("5ee6f05d757d5ca426d608b6"), "name" : "songzeceng", "age" : 32 }
{ "_id" : ObjectId("5ee6f062757d5ca426d608b7"), "name" : "songzeceng", "age" : 24 }
{ "_id" : ObjectId("5ee6edac757d5ca426d608b5"), "name" : "jason", "age" : 21 }
{ "_id" : ObjectId("5ee6f285757d5ca426d608b9"), "name" : "bob", "age" : 17 }
{ "_id" : 1, "name" : "b" }
6. Statistics, use the count() function
> db.test2.find({age:{$gte:18}}).count()
4
You can also just use the count() function and write the conditions in the count() function
> db.test2.count({age:{$lte:25}})
4
7. View all unique field values
> db.test2.distinct("name")
[ "jason", "b", "songzeceng", "szc", "bob" ]
You can also add conditions
> db.test2.distinct("name", {"age":{$lt:20}})
[ "bob" ]
Delete records of a certain collection
> db.article_url.remove({});
WriteResult({ "nRemoved" : 30 })
database backup
Give the server address, database name and export path
[root@localhost szc]# mongodump -h 192.168.57.141 -d test2 -o /home/szc/monogo
2020-06-15T13:00:15.083+0800 writing test2.test2 to
2020-06-15T13:00:15.084+0800 writing test2.t0 to
2020-06-15T13:00:15.085+0800 done dumping test2.test2 (6 documents)
2020-06-15T13:00:15.085+0800 done dumping test2.t0 (0 documents)
[root@localhost szc]# ll /home/szc/monogo/
total 0
drwxr-xr-x. 2 root root 90 Jun 15 13:00 test2
[root@localhost szc]# ll /home/szc/monogo/test2
total 12
-rw-r--r--. 1 root root 0 Jun 15 13:00 t0.bson
-rw-r--r--. 1 root root 80 Jun 15 13:00 t0.metadata.json
-rw-r--r--. 1 root root 291 Jun 15 13:00 test2.bson
-rw-r--r--. 1 root root 83 Jun 15 13:00 test2.metadata.json
[root@localhost szc]#
Database recovery
Give the server address, imported database name and import path
[root@localhost szc]# mongorestore -h 192.168.57.141 -d restore2 --dir /home/szc/monogo/test2
polymerization
Group $group
> db.test2.aggregate({$group: {_id:"$name"}})
{ "_id" : "bob" }
{ "_id" : "jason" }
{ "_id" : "szc" }
{ "_id" : "b" }
{ "_id" : "songzeceng" }
Group and count, use the $sum operator, 1 means accumulate the first column (_id)
> db.test2.aggregate({$group: {_id:"$name", count:{$sum: 1}}})
{ "_id" : "bob", "count" : 1 }
{ "_id" : "jason", "count" : 1 }
{ "_id" : "szc", "count" : 1 }
{ "_id" : "b", "count" : 1 }
{ "_id" : "songzeceng", "count" : 2 }
Average age
> db.test2.aggregate({$group: {_id:"$name", count:{$sum: 1}, avg: {$avg: "$age"}}})
{ "_id" : "bob", "count" : 1, "avg" : 17 }
{ "_id" : "jason", "count" : 1, "avg" : 21 }
{ "_id" : "szc", "count" : 1, "avg" : 23 }
{ "_id" : "b", "count" : 1, "avg" : null }
{ "_id" : "songzeceng", "count" : 2, "avg" : 28 }
group by null, used to group all documents into a group
> db.test2.aggregate({$group: {_id:null, sum:{$sum: 1}, mean_age:{$avg:"$age"}}})
{ "_id" : null, "sum" : 6, "mean_age" : 23.4 }
Group by multiple fields
> db.test2.aggregate({$group:{_id:{name:"$name",age:"$age"}, count:{$sum:1}, avg:{$avg:"$age"}}})
{ "_id" : { "name" : "bob", "age" : 17 }, "count" : 1, "avg" : 17 }
{ "_id" : { "name" : "szc", "age" : 23 }, "count" : 1, "avg" : 23 }
{ "_id" : { "name" : "b" }, "count" : 1, "avg" : null }
{ "_id" : { "name" : "songzeceng", "age" : 24 }, "count" : 1, "avg" : 24 }
{ "_id" : { "name" : "jason", "age" : 21 }, "count" : 1, "avg" : 21 }
{ "_id" : { "name" : "songzeceng", "age" : 32 }, "count" : 1, "avg" : 32 }
When using multiple grouping pipelines, you can take the value in the field in the previous pipeline as the key
> db.test2.aggregate({$group:{_id:{name:"$name",age:"$age"}, count:{$sum:1}, avg:{$avg:"$age"}}}, {$group:{_id:"$_id.name", count:{$sum:1}}})
{ "_id" : "jason", "count" : 1 }
{ "_id" : "bob", "count" : 1 }
{ "_id" : "szc", "count" : 1 }
{ "_id" : "b", "count" : 1 }
{ "_id" : "songzeceng", "count" : 2 }
Projection $project
You can change the output fields
> db.test2.aggregate({$group:{_id:"$name", count:{$sum:1}, avg:{$avg:"$age"}}}, {$project:{name:"$_id", count:"$count", avg:"$avg"}})
{ "_id" : "bob", "name" : "bob", "count" : 1, "avg" : 17 }
{ "_id" : "jason", "name" : "jason", "count" : 1, "avg" : 21 }
{ "_id" : "szc", "name" : "szc", "count" : 1, "avg" : 23 }
{ "_id" : "b", "name" : "b", "count" : 1, "avg" : null }
{ "_id" : "songzeceng", "name" : "songzeceng", "count" : 2, "avg" : 28 }
You can also control the output of the field
> db.test2.aggregate({$group:{_id:"$name", count:{$sum:1}, avg:{$avg:"$age"}}}, {$project:{name:"$_id", count:1, avg:1, _id:0}})
{ "count" : 1, "avg" : 17, "name" : "bob" }
{ "count" : 1, "avg" : 21, "name" : "jason" }
{ "count" : 1, "avg" : 23, "name" : "szc" }
{ "count" : 1, "avg" : null, "name" : "b" }
{ "count" : 2, "avg" : 28, "name" : "songzeceng" }
Filter $match
For example, select records with age >=20
> db.test2.aggregate({$match:{age:{$gte:20}}}, {$group:{_id:"$name", count:{$sum:1}, avg:{$avg:"$age"}}}, {$project:{name:"$_id", count:1, avg:1, _id:0}})
{ "count" : 2, "avg" : 28, "name" : "songzeceng" }
{ "count" : 1, "avg" : 23, "name" : "szc" }
{ "count" : 1, "avg" : 21, "name" : "jason" }
You can also use logical operators such as $or in $match
> db.test2.aggregate({$match:{$or:[{age:{$gte:20}}, {name:{$regex:"^b"}}]}}, {$group:{_id:"$name", count:{$sum:1}, avg:{$avg:"$age"}}}, {$project:{name:"$_id", count:1, avg:1, _id:0}})
{ "count" : 1, "avg" : 17, "name" : "bob" }
{ "count" : 1, "avg" : 21, "name" : "jason" }
{ "count" : 1, "avg" : 23, "name" : "szc" }
{ "count" : 1, "avg" : null, "name" : "b" }
{ "count" : 2, "avg" : 28, "name" : "songzeceng" }
Sort $sort, -1 means descending order, 1 means ascending order
> db.test2.aggregate({$group:{_id:"$name", count:{$sum:1}, avg:{$avg:"$age"}}}, {$sort:{avg: -1}})
{ "_id" : "songzeceng", "count" : 2, "avg" : 28 }
{ "_id" : "szc", "count" : 1, "avg" : 23 }
{ "_id" : "jason", "count" : 1, "avg" : 21 }
{ "_id" : "bob", "count" : 1, "avg" : 17 }
{ "_id" : "b", "count" : 1, "avg" : null }
Taking the finite value $limit and skipping $skip is similar to the usage in the find() query
> db.test2.aggregate({$group:{_id:"$name", count:{$sum:1}, avg:{$avg:"$age"}}}, {$sort:{avg: -1}}, {$limit:2})
{ "_id" : "songzeceng", "count" : 2, "avg" : 28 }
{ "_id" : "szc", "count" : 1, "avg" : 23 }
> db.test2.aggregate({$group:{_id:"$name", count:{$sum:1}, avg:{$avg:"$age"}}}, {$sort:{avg: -1}}, {$skip:2})
{ "_id" : "jason", "count" : 1, "avg" : 21 }
{ "_id" : "bob", "count" : 1, "avg" : 17 }
{ "_id" : "b", "count" : 1, "avg" : null }
The two can still be used in combination, but the order is different at this time. Note that the order of the two in the find() query does not matter
> db.test2.aggregate({$group:{_id:"$name", count:{$sum:1}, avg:{$avg:"$age"}}}, {$sort:{avg: -1}}, {$skip:2}, {$limit:1})
{ "_id" : "jason", "count" : 1, "avg" : 21 }
> db.test2.aggregate({$group:{_id:"$name", count:{$sum:1}, avg:{$avg:"$age"}}}, {$sort:{avg: -1}}, {$limit:1}, {$skip:2})
>
Expand $unwind
> db.test2.insert({"name":"songzeceng", "langs":["Java", "C", "C++", "Python", "Scala"]})
WriteResult({ "nInserted" : 1 })
> db.test2.aggregate({$match:{name:"songzeceng"}}, {$unwind:"$langs"})
{ "_id" : ObjectId("5ee73ef68ea0bda5c6c60db1"), "name" : "songzeceng", "langs" : "Java" }
{ "_id" : ObjectId("5ee73ef68ea0bda5c6c60db1"), "name" : "songzeceng", "langs" : "C" }
{ "_id" : ObjectId("5ee73ef68ea0bda5c6c60db1"), "name" : "songzeceng", "langs" : "C++" }
{ "_id" : ObjectId("5ee73ef68ea0bda5c6c60db1"), "name" : "songzeceng", "langs" : "Python" }
{ "_id" : ObjectId("5ee73ef68ea0bda5c6c60db1"), "name" : "songzeceng", "langs" : "Scala" }
Statistics after expansion
> db.test2.aggregate({$match:{name:"songzeceng"}}, {$unwind:"$langs"}, {$group:{_id:null, total:{$sum:1}}}, {$project:{_id:0, total:1}})
{ "total" : 5 }
$unwind will filter out the records with missing expanded fields by default, you can specify preserveNullAndEmptyArrays:true to cancel the filtering
> db.test2.aggregate({$unwind:{path:"$langs", preserveNullAndEmptyArrays:true}}, {$project:{_id:0, name:1, langs:1}})
{ "name" : "jason" }
{ "name" : "b" }
{ "name" : "songzeceng" }
{ "name" : "songzeceng" }
{ "name" : "szc" }
{ "name" : "bob" }
{ "name" : "songzeceng", "langs" : "Java" }
{ "name" : "songzeceng", "langs" : "C" }
{ "name" : "songzeceng", "langs" : "C++" }
{ "name" : "songzeceng", "langs" : "Python" }
{ "name" : "songzeceng", "langs" : "Scala" }
index
Index
> for(i=0;i<100000;i++) {db.test3.insert({name:'test'+i, age:i})}
WriteResult({ "nInserted" : 1 })
Then query a certain data and display the execution time. The executionTimeMillis in the result represents the execution time in milliseconds
> db.test3.find({'name':'test2341'}).explain("executionStats")
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "test2.test3",
"indexFilterSet" : false,
"parsedQuery" : {
"name" : {
"$eq" : "test2341"
}
},
"winningPlan" : {
"stage" : "COLLSCAN",
"filter" : {
"name" : {
"$eq" : "test2341"
}
},
"direction" : "forward"
},
"rejectedPlans" : [ ]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 1,
"executionTimeMillis" : 26,
"totalKeysExamined" : 0,
"totalDocsExamined" : 100000,
"executionStages" : {
"stage" : "COLLSCAN",
"filter" : {
"name" : {
"$eq" : "test2341"
}
},
"nReturned" : 1,
"executionTimeMillisEstimate" : 10,
"works" : 100002,
"advanced" : 1,
"needTime" : 100000,
"needYield" : 0,
"saveState" : 781,
"restoreState" : 781,
"isEOF" : 1,
"invalidates" : 0,
"direction" : "forward",
"docsExamined" : 100000
}
},
"serverInfo" : {
"host" : "localhost.localdomain",
"port" : 27017,
"version" : "3.4.24",
"gitVersion" : "865b4f6a96d0f5425e39a18337105f33e8db504d"
},
"ok" : 1
}
Create an index, 1 means ascending order by name (here there is little difference in ascending and descending order)
> db.test3.ensureIndex({name:1})
{
"createdCollectionAutomatically" : false,
"numIndexesBefore" : 1,
"numIndexesAfter" : 2,
"ok" : 1
}
Look at the execution time
> db.test3.find({'name':'test2341'}).explain("executionStats")
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "test2.test3",
"indexFilterSet" : false,
"parsedQuery" : {
"name" : {
"$eq" : "test2341"
}
},
"winningPlan" : {
"stage" : "FETCH",
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"name" : 1
},
"indexName" : "name_1",
"isMultiKey" : false,
"multiKeyPaths" : {
"name" : [ ]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"name" : [
"[\"test2341\", \"test2341\"]"
]
}
}
},
"rejectedPlans" : [ ]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 1,
"executionTimeMillis" : 0,
"totalKeysExamined" : 1,
"totalDocsExamined" : 1,
"executionStages" : {
"stage" : "FETCH",
"nReturned" : 1,
"executionTimeMillisEstimate" : 0,
"works" : 2,
"advanced" : 1,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"invalidates" : 0,
"docsExamined" : 1,
"alreadyHasObj" : 0,
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 1,
"executionTimeMillisEstimate" : 0,
"works" : 2,
"advanced" : 1,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"invalidates" : 0,
"keyPattern" : {
"name" : 1
},
"indexName" : "name_1",
"isMultiKey" : false,
"multiKeyPaths" : {
"name" : [ ]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"name" : [
"[\"test2341\", \"test2341\"]"
]
},
"keysExamined" : 1,
"seeks" : 1,
"dupsTested" : 0,
"dupsDropped" : 0,
"seenInvalidated" : 0
}
}
},
"serverInfo" : {
"host" : "localhost.localdomain",
"port" : 27017,
"version" : "3.4.24",
"gitVersion" : "865b4f6a96d0f5425e39a18337105f33e8db504d"
},
"ok" : 1
}
From 26 milliseconds to 0 milliseconds, the effect is still very significant
> db.test3.getIndexes()
[
{
"v" : 2,
"key" : {
"_id" : 1
},
"name" : "_id_",
"ns" : "test2.test3"
},
{
"v" : 2,
"key" : {
"name" : 1
},
"name" : "name_1",
"ns" : "test2.test3"
}
]
Delete index
> db.test3.dropIndex({name:1})
{ "nIndexesWas" : 2, "ok" : 1 }
Create a unique index
After creation, perform data deduplication when inserting data again, new data will not overwrite the old data, and an error will be reported directly
> db.test3.ensureIndex({"name":1}, {"unique":true})
{
"createdCollectionAutomatically" : false,
"numIndexesBefore" : 1,
"numIndexesAfter" : 2,
"ok" : 1
}
Create a composite index
> db.test3.ensureIndex({"name":1, "age":-1})
{
"createdCollectionAutomatically" : false,
"numIndexesBefore" : 1,
"numIndexesAfter" : 2,
"ok" : 1
}
> db.test3.getIndexes()
[
{
"v" : 2,
"key" : {
"_id" : 1
},
"name" : "_id_",
"ns" : "test2.test3"
},
{
"v" : 2,
"key" : {
"name" : 1,
"age" : -1
},
"name" : "name_1_age_-1",
"ns" : "test2.test3"
}
]
Conclusion
The above is the study notes of MongoDB, thank you for reading