nl
stringlengths
13
387
bash
stringlengths
1
532
Split "bigfile" into files of at most 1000 lines each with prefix "/lots/of/little/files/here"
split bigfile /lots/of/little/files/here
Split "complete.out" into files with at most "$lines_per_file" lines each
split --lines $lines_per_file complete.out
Split "data.tsv" into files of at most 100 MiB preserving lines and use a prefix of "data.tsv." and numeric suffixes
split -C 100m -d data.tsv data.tsv.
Split "data.tsv" into files of at most 5000000 lines each with prefix "_tmp"
split -l5000000 data.tsv '_tmp';
Split "database.sql" into files of at most 100000 lines each with prefix "database-"
split -l 100000 database.sql database-
Split "date.csv" into files with at most 100 lines each
split -l 100 date.csv
Split "file.tar.gz" into files of size 1024 MB with a prefix of "file.tar.gz.part-"
split -b 1024m "file.tar.gz" "file.tar.gz.part-"
Split "file.txt" excluding the first line into files of at most 4 lines each and with a prefix "split_"
tail -n +2 file.txt | split -l 4 - split_
Split "file.txt" excluding the first line into files with at most 20 lines each and a prefix "split_"
tail -n +2 file.txt | split -l 20 - split_
Split "file.txt" into files of at most 1 MiB in size with a numeric suffix, prefix "file", and additional suffix ".txt"
split -b 1M -d file.txt file --additional-suffix=.txt
Split "file.txt" into files of at most 20 lines each with a prefix "new"
split -l 20 file.txt new
Split "foo.txt" into files with 1 line each and use a suffix length of 5
split --suffix-length=5 --lines=1 foo.txt
Split "hugefile.txt" into files with 100000 lines each starting with "part." and using numeric suffixes
split -a4 -d -l100000 hugefile.txt part.
Split "infile" into 2 files of about equal size
split -n2 infile
Split "input.txt" into files of at most 10 bytes each with prefix "/tmp/split-file"
split -b 10 input.txt /tmp/split-file
Split "input.txt" into files of at most 10 bytes each with prefix "xxx/split-file"
split -b 10 input.txt xxx/split-file
Split "input.txt" into files with 1 line each and use a prefix "output." and a suffix length of 5
split --lines=1 --suffix-length=5 input.txt output.
Split "input_file" into files of at most 100 lines each with prefix "output_file"
split -l 100 input_file output_file
Split "list.txt" into files with at most 600 lines each
split -l 600 list.txt
Split "mybigfile.txt" into files of at most 200000 lines each
split -l 200000 mybigfile.txt
Split "randn20M.csv" into files of at most 5000000 lines each with prefix "_tmp"
split -l5000000 randn20M.csv '_tmp';
Split "system.log" into files of at most 10 MiB in size with a numeric suffix and prefix "system_split.log"
split -b 10M -d system.log system_split.log
Split "t.txt" into files with at most 30000000 lines each and use a prefix "t" and numeric suffixes of length 2
split --lines=30000000 --numeric-suffixes --suffix-length=2 t.txt t
Split "your_file" into files with at most 9 lines each
split -l9 your_file
split $SOURCE_FILE" into pieces per 100 lines
split -l 100 "$SOURCE_FILE"
Split a file "file.tar.gz" into pieces named as "file.tar.gz.part-NNN" with size 1024 MB where NNN is a numeric suffix
split -b 1024m "file.tar.gz" "file.tar.gz.part-"
Split a file "file.tar.gz" into pieces with size 1024 MB
split -b 1024m file.tar.gz
split a file "list.txt" into pieces per 600 lines
split -l 600 list.txt
Split a file ADDRESSS_FILE into pieces per 20 lines named with prefix "temp_file_ and numeric suffixes
split -l20 ADDRESSS_FILE temp_file_
Split all files in the directory tree "/dev/shm/split/" into files of at most 1000 lines each and use the filename as the prefix
find /dev/shm/split/ -type f -exec split -l 1000 {} {} \;
split compressed content of the directory /home into pieces per 4000 mb named as "/media/DRIVENAME/BACKUPNAME.tgz.NNN"
tar --one-file-system -czv /home | split -b 4000m - /media/DRIVENAME/BACKUPNAME.tgz
split compressed content of the file www into pieces per 1073741824 bytes named as "backup.tar.NNN"
tar czf - www|split -b 1073741824 - www-backup.tar.
Split the contents of all ".txt" excluding the first 1000 lines into files of at most 1000 lines each
cat *.txt | tail -n +1001 | split --lines=1000
split content of the file file.txt started from second line into pieces per 4 lines named as split_NNN
tail -n +2 file.txt | split -l 4 - split_
split content of the file inputfile except lines started with "^t:" into pieces per 200 lines
cat inputfile | grep "^t\:" | split -l 200
split file "${fspec} into pieces named as "xyzzyNNN" with numeric prefix from 1 to 6
split --number=l/6 ${fspec} xyzzy.
split the file "/path/to/large/file" into pieces per 50000 lines named as /path/to/output/file/prefixNNN
split --lines=50000 /path/to/large/file /path/to/output/file/prefix
split the file "file" into pieces per 2 lines
split -n2 infile
split file "your_file" into pieces per 9 lines
split -l9 your_file
split file "$file into pieces named with 5 character suffix
split -a 5 $file
split file /etc/gconf/schemas/gnome-terminal.schemas into pieces per 1000000 lines
split -n 1000000 /etc/gconf/schemas/gnome-terminal.schemas
split file /usr/bin/firefox into pieces per 1000 lines
split -n 1000 /usr/bin/firefox
split file /usr/bin/gcc into pieces per 100000 lines
split -n 100000 /usr/bin/gcc
split file data.csv into pieces per 100 lines
split -l 100 date.csv
split the file data.csv into pieces per 100 mb named as data.tsv.NNN with digital prefixes
split -C 100m -d data.tsv data.tsv.
split the file hugefile.txt into pieces per 100000 lines named as partNNN with digital prefixes
split -a4 -d -l100000 hugefile.txt part.
split file input.txt into pieces per 1 line named output.NNNNN
split --lines=1 --suffix-length=5 input.txt output.
split file t.txt into pieces per 30000000 lines named as "t.NN" with numeric suffix
split --lines=30000000 --numeric-suffixes --suffix-length=2 t.txt t
split listing of the current directory into pieces per 500 lines named "outputXYZNNN"
ls | split -l 500 - outputXYZ.
Split the output of "ls" into files of at most 500 lines each with prefix "outputXYZ."
ls | split -l 500 - outputXYZ.
Split the output of "tar [your params]" into files of at most 500 MiB in size and use prefix "output_prefix"
tar [your params] |split -b 500m - output_prefix
split the result of command "ping -c 25 google.com | tee " into pieces per 100000 bytes named as "/home/user/myLogFile.logNNN"
ping -c 25 google.com | tee >(split -d -b 100000 - /home/user/myLogFile.log)
split result of the command "tar [your params]" into pieces per 500 mb named as "output_prefixNNN"
tar [your params] |split -b 500m - output_prefix
Split the sorted and unique lines in files "emails_*.txt" into files with at most 200 lines each with numeric suffixes of length 4
sort --unique emails_*.txt | split --numeric-suffixes --lines=200 --suffix-length=4 --verbose
Split standard input into files of at most 1000 lines each
split
Split standard input into files of at most 3400000 lines each
split -l 3400000
Split standard input into files with at most 75 lines each
split --lines=75
SSH in server 'server' as user 'user' with X11 forwarding disabled
ssh -x user@server
SSH into "$NAME" as user "${USERNAME}" using key file "${KEYDIR}/${KEY}.pem", automatically add the host to list of known hosts and execute "${COMMANDS}"
ssh -o "StrictHostKeyChecking no" -i ${KEYDIR}/${KEY}.pem ${USERNAME}@$NAME "${COMMANDS}"
SSH into "localhost" with forced pseudo-terminal allocation, execute "$heredoc", and save the output to variable "REL_DIR"
REL_DIR="$(ssh -t localhost "$heredoc")"
SSH into "myhost.com" as user "myname" with a check every 60 seconds that the server is still alive
ssh -o ServerAliveInterval=60 myname@myhost.com
ssh into default vagrant host without running "vagrant ssh" by passing the configuration parameters vagrant uses for ssh
ssh vagrant@127.0.0.1 -p 2222 -o Compression=yes -o DSAAuthentication=yes -o LogLevel=FATAL -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -i ~/.vagrant.d/less_insecure_private_key -o ForwardAgent=yes
SSH into host "$1" using key file "/path/to/ssh/secret/key" and execute command "$2"
ssh -i /path/to/ssh/secret/key $1 $2
SSH into host "server" as user "user"
ssh user@server
ssh into localhost on port 4444
ssh -p 4444 localhost
SSH into server "app1" as the current user
ssh app1
SSH into server "server.com" as user "remote_user"
ssh remote_user@server.com
SSH login in 'middle.example.org' via port 2222 as user 'deviceuser' using a private key file './device_id.pem'
ssh -i ./device_id.pem deviceuser@middle.example.org:2222
SSH using parameters in $@ in master mode in the background without executing any commands and set the ControlPath to "$MASTERSOCK"
ssh -o ControlPath="$MASTERSOCK" -MNf "$@"
SSH with parameters specified in "$@" using key file "~/.ssh/gitkey_rsa"
ssh -i ~/.ssh/gitkey_rsa "$@"
Start 'top' to monitor all processes with the default settings.
top
start 2 sleep processes in the background
sleep 100 | sleep 200 &
start from current directory, skip the directory src/emacs and print it then skip all files and directories under it, and print the names of the other files found
find . -wholename './src/emacs' -prune , -print
start from current directory, skip the directory src/emacs and print it then skip all files and directories under it, and print the names of the other files found
find . -wholename './src/emacs' -prune -print -o -print
Store content of uncompressed file "$file.fastq" in variable "reads"
reads=$(zcat $file.fastq)
Store info about all mounted file systems, printing all sizes in powers of 1000
a=$( df -H )
Store N symbols of input into variable 'buffer'
read -N $BUFSIZE buffer
Stores system load average number in the 'proc_load_average' variable.
proc_load_average=$(w | head -1 | cut -d" " -f13 | cut -d"," -f1-2 | tr ',' '.')
Stores system load average number in the 'proc_load_average' variable.
proc_load_average=`w | head -1 | cut -d" " -f13 | cut -d"," -f1-2 | tr ',' '.'`
Strips two last sections from the path $pathname, and prints basename of the rest part.
echo $(basename $(dirname $(dirname $pathname)))
Strip all '\' and newlines from $output and save the result to variable 'output'
output=$(echo "$output" | tr -d '\' | tr -d '\n')
Suffix all files and folders in the current directory with "_SUF"
ls | xargs -I {} mv {} {}_SUF
switch to user username
su username
Synchronize "/home/user1/" to "wobgalaxy02:/home/user1/" including hidden files
rsync -av /home/user1/ wobgalaxy02:/home/user1/
Synchronize "/path/to/dir_a" with files in "/path/to/dir_b/" if the files are newer
rsync -rtuv /path/to/dir_b/* /path/to/dir_a
Synchronize "/path/to/dir_b" with files in "/path/to/dir_a/" if the files are newer
rsync -rtuv /path/to/dir_a/* /path/to/dir_b
Synchronize "xxx-files" to "different-stuff/xxx-files" recursively preserving metadata with a bandwidth limit of 2000 KiB/s
rsync -pogtEtvr --progress --bwlimit=2000 xxx-files different-stuff
Take a file path from standard input and remove it.
xargs -I '{}' rm '{}'
Take a file path from standard input and remove it.
xargs -i rm '{}'
Take first text field from file 'file.txt' as a domain name and get short A record for this one.
cut -d' ' -f1 file.txt | xargs dig +short
Takes folder path from string '/path/to/copy/file/to/is/very/deep/there' and created it with all parents.
mkdir -p `dirname /path/to/copy/file/to/is/very/deep/there` \
Take the last slash-separated section of variable "FILE" and copy it to variable "NAME".
NAME=`basename "$FILE"`
Takes path list from '.exportfiles.text' file, cuts off first two path segments and last one.
cut -d / -f 4- .exportfiles.text | xargs -n 1 dirname
Take the section of variable "FILE" between the last slash and the following dot, if any, and store that section in variable "NAME".
NAME=`basename "$FILE" | cut -d'.' -f-1`
tar all files in the current folder and ask for user confirmation before creating the tar ball
find . -ok tar rvf backup {} \;
Test if a file named 'file' in the current directory is more than 1 hour old
find file -chour +1 -exit 0 -o -exit 1
Test if a file named 'file' in the current directory is more than 1 hour old
find file -prune -cmin +60 -print | grep -q .
Time stamp every ping request to 8.8.8.8 in Unix epoch format
ping -D -n -O -i1 -W1 8.8.8.8
Traverse the filesystem just once, listing setuid files and directories into /root/suid.txt and large files into /root/big.txt.
find / \( -perm -4000 -fprintf /root/suid.txt '%#m %u %p\n' \) , \ \( -size +100M -fprintf /root/big.txt '%-10s %p\n' \)
Traverse the filesystem just once, listing setuid files and directories into /root/suid.txt and large files into /root/big.txt.
find / \( -perm -4000 -fprintf /root/suid.txt '%#m %u %p\n' \) , \( -size +100M -fprintf /root/big.txt '%-10s %p\n' \)