Under Knoppix:
# mkdir /mnt/raid
# mount --read-only /dev/sda3 /mnt/raid
mount: /dev/sda3 already mounted or /mnt/raid busy
# mount | grep sda
# mount --read-only /dev/sdb3 /mnt/raid
mount: /dev/sdb3 already mounted or /mnt/raid busy
# mount | grep sdb
Under CentOS Rescue:
(cheerfully tells me I have no Linux partitions)
# mkdir /mnt/raid
# mount --read-only /dev/sda3 /mnt/raid
mount: you must specify the file system type
(GT: Oops - the disk used to save data to got moved to sda instead of
sdc after reboot)
# mount --read-only /dev/sdb3 /mnt/raid
mount: you must specify the file system type
# mount --read-only /dev/sdc3 /mnt/raid
mount: you must specify the file system type
# mount -t ext4 --read-only /dev/sdb3 /mnt/raid
mount: special device /dev/sdb3 does not exist
# ls -l /dev/sdb*
brw-rw----. 1 root disk 8, 16 2014-02-02 14:14 /dev/sdb
# ls -l /dev/sdc*
brw-rw----. 1 root disk 8, 16 2014-02-02 14:14 /dev/sdc
brw-rw----. 1 root disk 8, 16 2014-02-02 14:14 /dev/sdc1
brw-rw----. 1 root disk 8, 16 2014-02-02 14:14 /dev/sdc2
#
I was able to mount /dev/sdc1 - it's the boot partition and has the
right stuff in it.
# parted /dev/sdc print
Error: The backup GPT table is not at the end of the disk, as it should be.
This might mean that another operating system believes the disk is smaller.
Fix, by moving the backup to the end (and removing the old backup)?
Fix/Ignore/Cancel? c
------------------------------
Installed new CentOS system on stand-alone drive. Added the two raid
drives to the system . . .
[root@localhost ~]# fdisk -l | grep dev/sd
WARNING: GPT (GUID Partition Table) detected on '/dev/sdb'! The util
fdisk doesn't support GPT. Use GNU Parted.
WARNING: GPT (GUID Partition Table) detected on '/dev/sdc'! The util
fdisk doesn't support GPT. Use GNU Parted.
Disk /dev/sda: 640.1 GB, 640135028736 bytes
/dev/sda1 * 1 64 512000 83 Linux
/dev/sda2 64 77826 624618496 8e Linux LVM
Disk /dev/sdb: 3000.6 GB, 3000591900160 bytes
/dev/sdb1 1 267350 2147483647+ ee GPT
Disk /dev/sdc: 3000.6 GB, 3000591900160 bytes
/dev/sdc1 1 267350 2147483647+ ee GPT
[root@localhost ~]#
==> RAID drives are sdb and sdc <==
[root@localhost ~]# blkid
/dev/sda1: UUID="fea610e9-5e28-4658-9d1f-8c0472530921" TYPE="ext4"
/dev/sda2: UUID="ufSFAS-avCS-eLTX-4wbN-3QSI-DIqC-o2bfiC" TYPE="LVM2_member"
/dev/sdc1: UUID="4842ad37-e472-4a11-aa53-16a61edf03c4" TYPE="ext4"
/dev/sdc2: UUID="2608f01c-6407-4c5b-9be2-3d820123de78" TYPE="swap"
/dev/mapper/VolGroup-lv_root:
UUID="d2c27a2b-2007-4839-8350-166df0637996" TYPE="ext4"
/dev/mapper/VolGroup-lv_swap:
UUID="b00df2e1-5b82-4b4d-b3e9-a93a5e4078ed" TYPE="swap"
/dev/mapper/VolGroup-lv_home:
UUID="945e33e7-383a-4159-8b5c-ce76f2817080" TYPE="ext4"
/dev/sdd1: LABEL="My Book" UUID="A2CA0AEBCA0ABC13" TYPE="ntfs"
[root@localhost ~]#
[root@localhost ~]# mount --read-only /dev/sdb3 /mnt/raid
mount: you must specify the filesystem type
[root@localhost ~]# mount --read-only -t ext4 /dev/sdb3 /mnt/raid
mount: special device /dev/sdb3 does not exist
[root@localhost ~]#
[root@localhost ~]# parted /dev/sdb print
Error: Invalid argument during seek for read on /dev/sdb
Retry/Ignore/Cancel? i
Error: The backup GPT table is corrupt, but the primary appears OK, so
that will
be used.
OK/Cancel? o
Backtrace has 8 calls on stack:
8: /lib64/libparted-2.1.so.0(ped_assert+0x31) [0x7fe33d585151]
7: /lib64/libparted-2.1.so.0(+0x42b2d) [0x7fe33d5b5b2d]
6: /lib64/libparted-2.1.so.0(ped_disk_new+0x75) [0x7fe33d58c305]
5: parted() [0x40692c]
4: parted(non_interactive_mode+0x8c) [0x40c73c]
3: parted(main+0x63) [0x40aaa3]
2: /lib64/libc.so.6(__libc_start_main+0xfd) [0x7fe33cd99d1d]
1: parted() [0x404f49]
You found a bug in GNU Parted!
---------------
Attempting scary RAID stuff . . .
[root@localhost ~]# mdadm --assemble --scan
mdadm: No arrays found in config file or automatically
[root@localhost ~]#
[root@localhost ~]# mdadm --create /dev/md0 -n2 -l1 /dev/sdb3 missing
mdadm: cannot open /dev/sdb3: No such file or directory
[root@localhost ~]# mdadm --create /dev/md0 -n2 -l1 /dev/sdb1 missing
mdadm: cannot open /dev/sdb1: No such file or directory
[root@localhost ~]# mdadm --create /dev/md0 -n2 -l1 /dev/sdc3 missing
mdadm: cannot open /dev/sdc3: No such file or directory
[root@localhost ~]# mdadm --create /dev/md0 -n2 -l1 /dev/sdc1 missing
mdadm: /dev/sdc1 appears to contain an ext2fs file system
size=204800K mtime=Sun Dec 29 10:32:43 2013
mdadm: Note: this array has metadata at the start and
may not be suitable as a boot device. If you plan to
store '/boot' on this device please ensure that
your boot-loader understands md/v1.x metadata, or use
--metadata=0.90
Continue creating array? y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
[root@localhost ~]# mount /dev/md0 /mnt/raid
mount: you must specify the filesystem type
[root@localhost ~]# mount -t ext4 /dev/md0 /mnt/raid
mount: wrong fs type, bad option, bad superblock on /dev/md0,
missing codepage or helper program, or other error
In some cases useful info is found in syslog - try
dmesg | tail or so
[root@localhost ~]#
But this is wrong - sdc1 is 200MB boot partition, but it proves the
command works :)
---------------------
It seems like I may be able to mount /dev/sdc3 if I can correct the size.
Thoughts?
Regards,
George Toft
On 2/2/2014 10:47 AM, Brian Cluff wrote:
> If it's a RAID 1 you shouldn't need to assemble it to get your data,
> just mount the raid partition directly read only and copy your data
> off to somewhere else.
>
> You should be able to do something like:
>
> mount --read-only /dev/sdb1 /mnt
> or if the above one doesn't work:
> mount --read-only /dev/sdc1 /mnt
>
> The other possibility you could try sounds terrifying but it works...
> Just create a new array:
> mdadm --create /dev/md0 -n2 -l1 /dev/sdb1 /dev/sdc1
>
> When you create an array all it does, for the most part, is write a
> superblock at the end of the partition so that it can later identify
> the associated drives and be able to automatically put them back
> together. The data area itself it unaffected, so it should be safe to
> just create a new array (just don't mkfs it afterwards). Creating a
> new array will change the RAID's UUID and such, so you won't be able
> to just put it back into service without first creating a new
> mdadm.conf and running mkinitrd but otherwise it should just mount up
> and go... as long as the data isn't completely corrupted.
>
> Tripple check that the partitions are absolutely correct or it will
> destroy your data when it starts to resync the array upon creation.
>
> You could also give yourself 2 chances to get your data back and make
> 2 RAID1 arrays out of your 2 raid drives by doing this:
> mdadm --create /dev/md0 -n2 -l1 /dev/sdb1 missing
> mdadm --create /dev/md1 -n2 -l1 /dev/sdc1 missing
>
> That will give you /dev/md0 and /dev/md1 which you could then mount up
> and hopefully copy all your data off.
>
> I hope this helps,
> Brian Cluff
>
> On 02/02/2014 09:25 AM, George Toft wrote:
>> I've spent over 15 hours on this (google . . . head . . .desk . . .
>> repeat).
>>
>> I need to recover the data off of one of these hard drives.
>>
>> Background
>> Two 3TB hard drives in a Raid 1 mirror, working fine for months. OS:
>> Centos 6.5
>> Woke up a few days ago to a dead system - looks like motherboard
>> failed. And when it failed, it appears to have corrupted the RAID
>> partition (supposition - see problems below). I moved the drives to
>> another system and it will boot then the kernel panics.
>>
>> Partitions
>> part 1 - /boot
>> part 2 - swap
>> part 3 - RAID
>>
>> I think the RAID partition has just one filesystem (/).
>>
>>
>> What I've done:
>> Rescue mode: Boots, unable to assemble raid set:
>>
>> # fdisk -l | egrep "GPT|dev"
>> WARNING: GPT (GUID Partition Table) detected on '/dev/sdb'! The util
>> fdisk doesn't support GPT. Use GNU Parted.
>>
>> WARNING: GPT (GUID Partition Table) detected on '/dev/sdb'! The util
>> fdisk doesn't support GPT. Use GNU Parted.
>>
>> Disk /dev/sda: 80.0 GB, 80025280000
>> Disk /dev/sdb: 3000.6 GB, 3000591900160 bytes
>> /dev/sdb1 1 267350 2147483647+ ee
>> GPT
>> Disk /dev/sdc: 3000.6 GB, 3000591900160 bytes
>> /dev/sdc1 1 267350 2147483647+ ee
>> GPT
>>
>> # mdadm --assemble --run /dev/md0 /dev/sdb
>> mdadm: Cannot assemble mbr metadata on /dev/sdb
>> mdadm: /dev/sdb has no superblock - assembly aborted
>>
>> # mdadm --assemble --run /dev/md0 /dev/sdb1
>> mdadm: cannot open device /dev/sdb1: No such file or directory
>> mdadm: /dev/sdb has no superblock - assembly aborted
>>
>>
>> parted tells me I've found a bug and gives me directions to report it.
>>
>> -----------
>>
>> Booted Knoppix and ran disktest. I can copy the RAID partition to
>> another drive as a disk image and I end up with image.dd. When I try to
>> build an array out of it, I get an error: Not a block device.
>>
>> Tried commercial RAID recovery software (Disk Internals) - it hung after
>> identifying 2.445 million files.
>>
>>
>> -------------
>>
>> Ideas on what to do next?
>>
>> Is anyone here up for a challenge? Anyone need beer money? I need the
>> data recovered, and will pay :)
>>
>> All help is appreciated :)
>>
>
> ---------------------------------------------------
> PLUG-discuss mailing list - PLUG-discuss@lists.phxlinux.org
> To subscribe, unsubscribe, or to change your mail settings:
> http://lists.phxlinux.org/mailman/listinfo/plug-discuss
>
---------------------------------------------------
PLUG-discuss mailing list -
PLUG-discuss@lists.phxlinux.org
To subscribe, unsubscribe, or to change your mail settings:
http://lists.phxlinux.org/mailman/listinfo/plug-discuss