Author Archive: .........CH&HU..!........

SQL Server – Basic SQL Commands for day to day SQL DBA


===== Number of SQL Connections & login name ====

SELECT
DB_NAME(dbid) as DBName,
COUNT(dbid) as NumberOfConnections,
loginame as LoginName
FROM
sys.sysprocesses
WHERE
dbid > 0
GROUP BY
dbid, loginame

===== Check for database compatibility =====

ALTER DATABASE DB_Caching_SearchFacets SET COMPATIBILITY_LEVEL = 110
SELECT compatibility_level FROM sys.databases WHERE name = ‘DBName’

— Enabling SQL Agent notification – SQL Server 2005, 2008, 2008 R2

USE [msdb]
EXEC msdb.dbo.sp_set_sqlagent_properties
@email_save_in_sent_folder=1

EXEC master.dbo.xp_instance_regwrite
N’HKEY_LOCAL_MACHINE’
, N’SOFTWARE\Microsoft\MSSQLServer\SQLServerAgent’
, N’UseDatabaseMail’
, N’REG_WORD’
, 1

EXEC master.dbo.xp_instance_regwrite
N’HKEY_LOCAL_MACHINE’
, N’SOFTWARE\Microsoft\MSSQLServer\SQLServerAgent’
, N’DatabaseMailProfile’
, N’REG_SZ’
, N’01_mail’

=========================================================

/***Run this query and save the full results – who is doing what at this moment*****/

SELECT r.session_id
,r.scheduler_id
,r.status
,s.text
,r.start_time
,r.blocking_session_id
,r.wait_type
,r.wait_time
,r.last_wait_type
,r.wait_resource
,r.open_transaction_count
,r.cpu_time
,r.total_elapsed_time
,r.reads
,r.writes
,r.logical_reads
,r.percent_complete
,r.estimated_completion_time
,wt.*
FROM sys.dm_exec_requests r
LEFT OUTER JOIN sys.dm_os_waiting_tasks wt ON r.session_id = wt.session_id
OUTER APPLY sys.dm_exec_sql_text(r.sql_handle) s
ORDER BY r.session_id ;

=========================================================

=== Check orphaned users for all DBs ====

CREATE TABLE ##ORPHANUSER
(
DBNAME VARCHAR(100),
USERNAME VARCHAR(100),
CREATEDATE VARCHAR(100),
USERTYPE VARCHAR(100)
)

EXEC SP_MSFOREACHDB’ USE [?]
INSERT INTO ##ORPHANUSER
SELECT DB_NAME() DBNAME, NAME,CREATEDATE,
(CASE
WHEN ISNTGROUP = 0 AND ISNTUSER = 0 THEN ”SQL LOGIN”
WHEN ISNTGROUP = 1 THEN ”NT GROUP”
WHEN ISNTGROUP = 0 AND ISNTUSER = 1 THEN ”NT LOGIN”
END) [LOGIN TYPE] FROM sys.sysusers
WHERE SID IS NOT NULL AND SID <> 0X0 AND ISLOGIN =1 AND
SID NOT IN (SELECT SID FROM sys.syslogins)’

SELECT * FROM ##ORPHANUSER

DROP TABLE ##ORPHANUSER

=========================================================

—–When will the Backup Finish / Backup status & FInish—-

SELECT command, percent_complete,
‘elapsed’ = total_elapsed_time / 60000.0,
‘remaining’ = estimated_completion_time / 60000.0
FROM sys.dm_exec_requests
WHERE command like ‘BACKUP%’

=========================================================
—- Backup details for single database —-

SELECT TOP 50 s.database_name, m.physical_device_name, CAST(DATEDIFF(second, s.backup_start_date, s.backup_finish_date) AS VARCHAR(100)) + ‘ ‘ + ‘Seconds’ TimeTaken, s.backup_start_date, CASE s.[type] WHEN ‘D’ THEN ‘Full’ WHEN ‘I’ THEN ‘Differential’
wHEN ‘L’ THEN ‘Transaction Log’ END AS BackupType, s.server_name, s.recovery_model FROM msdb.dbo.backupset s INNER JOIN msdb.dbo.backupmediafamily m ON s.media_set_id = m.media_set_id — WHERE s.database_name = DB_NAME() — Remove this line for all the ORDER BY backup_start_date DESC

=========================================================

–== Determine Locks held in database ==–

SELECT request_session_id AS [spid], DB_NAME(resource_database_id) AS [dbname],
CASE WHEN resource_type = ‘OBJECT’ THEN OBJECT_NAME(resource_associated_entity_id)
WHEN resource_associated_entity_id = 0 THEN ‘n/a’
ELSE OBJECT_NAME(p.object_id) END AS [entity_name], index_id, resource_type AS [resource],
resource_description AS [description], request_mode AS mode, request_status AS [status]
FROM sys.dm_tran_locks t
LEFT OUTER JOIN sys.partitions p ON p.partition_id = t.resource_associated_entity_id
WHERE t.resource_type <> ‘DATABASE’;

=========================================================

/******* Find the SESSIONS that are currently blocking ******/

CREATE TABLE #who2
(SPID int NOT NULL,
[Status] varchar(100),
Login varchar(50) ,
HostName varchar(100),
[BlkBy] varchar(20) ,
DBName varchar(55) ,
Command varchar(256),
CPUTime varchar(20),
DiskIO varchar(20),
LastBatch varchar(100),
ProgramName varchar(100),
SPID2 varchar(20),
RequestID varchar(20)
)
INSERT INTO #who2
EXEC master.sys.sp_who2 active
SELECT r.session_id AS SPID
,(SELECT MAX(HostName) FROM #who2 WHERE SPID = r.session_ID) AS Server
,(select max(sysprocesses.hostprocess) from master.dbo.sysprocesses (nolock) where sysprocesses.spid = r.session_id) as TaskManagerPID
,(SELECT MAX(LOGIN) FROM #who2 WHERE SPID = r.session_ID) AS Login
–,p.cpu as CPUTime, p.physical_io as DiskIO
,(SELECT SUM(cast(CPUTime as bigint)) FROM #who2 WHERE SPID = r.session_ID) AS CPUTime
,(SELECT SUM(cast(DiskIO as int)) FROM #who2 WHERE SPID = r.session_ID) AS DiskIO
,CONVERT(VARCHAR(16),start_time,120) AS start_time
,(SELECT MAX(DBName) FROM #who2 WHERE SPID = r.session_ID) AS DBName
— ,(SELECT MAX(Command) FROM #who2 WHERE SPID = r.session_ID) AS Command
— ,qt.text,r.statement_start_offset, r.statement_end_offset
,substring(qt.text,r.statement_start_offset/2,
(case when r.statement_end_offset = -1 then len(convert(nvarchar(max), qt.text)) * 2
else r.statement_end_offset end – r.statement_start_offset)/2 + 1) as query_text — this is the statement executing right now
— ,qt.dbid
— ,qt.objectid
,blocking_session_id AS BlockingSPID
— ,substring(convert(char(23),DATEADD(ms ,r.cpu_time, 0),121 ),12,8) AS cpu_time
,RIGHT(‘0’+CONVERT(VARCHAR,(DATEDIFF(ss,start_time,GETDATE()))/(3600)),2) + ‘:’+RIGHT(‘0’+CONVERT(VARCHAR,(DATEDIFF(ss,start_time,GETDATE()))%(3600)/60),2)+’:’+RIGHT(‘0’+CONVERT(VARCHAR,(DATEDIFF(ss,start_time,GETDATE()))%60),2) AS elapsed_time
,SUBSTRING(CONVERT(VARCHAR,CAST(r.reads AS MONEY),1),1,LEN(CONVERT(VARCHAR,CAST(r.reads AS MONEY),1))-3) AS reads
,SUBSTRING(CONVERT(VARCHAR,CAST(r.writes AS MONEY),1),1,LEN(CONVERT(VARCHAR,CAST(r.writes AS MONEY),1))-3) AS writes
,SUBSTRING(CONVERT(VARCHAR,CAST(r.logical_reads AS MONEY),1),1,LEN(CONVERT(VARCHAR,CAST(r.logical_reads AS MONEY),1))-3) AS logical_reads
,(SELECT MAX(ProgramName) FROM #who2 WHERE SPID = r.session_ID) AS ProgramName
,wait_type ,wait_time –,wait_resource
,r.status
–,*
–,r.scheduler_id
FROM sys.dm_exec_requests r (NOLOCK)
–inner join master.dbo.sysprocesses p with (nolock)
–on p.spid = r.session_id
CROSS APPLY sys.dm_exec_sql_text(r.sql_handle) as qt
WHERE r.session_id > 50
AND qt.text NOT LIKE ‘%SELECT r.session_id%’
–AND r.session_id IN (SELECT SPID FROM #who2 WHERE LOGIN LIKE ‘%siitusr%’)
ORDER BY 3,1
go
drop table #who2
go

=========================================================

——- KILL all the connections for a database ——

Use Master
Go

Declare @dbname sysname

Set @dbname = ‘databaseName’

Declare @spid int
Select @spid = min(spid) from master.dbo.sysprocesses
where dbid = db_id(@dbname)
While @spid Is Not Null
Begin
Execute (‘Kill ‘ + @spid)
Select @spid = min(spid) from master.dbo.sysprocesses
where dbid = db_id(@dbname) and spid > @spid
End

—— Simple script to close all connections to a MSSQL database. —-

USE master;
GO
ALTER DATABASE dbName
SET SINGLE_USER
WITH ROLLBACK IMMEDIATE;
GO
ALTER DATABASE dbName
SET MULTI_USER;
GO

=========================================================

—– get all DB sizes in MB———————-

SELECT d.name,
ROUND(SUM(mf.size) * 8 / 1024, 0) Size_MBs
FROM sys.master_files mf
INNER JOIN sys.databases d ON d.database_id = mf.database_id
WHERE d.database_id > 4 — Skip system databases
GROUP BY d.name
ORDER BY d.name

=========================================================

———————————————————————–
———————————————————————–

–1.To find out which recovery model your database is using

SELECT databasepropertyex(‘database’, ‘recovery’)

——————————————————————————

–2.To change the recovery option for a database

ALTER DATABASE database name SET RECOVERY {FULL | SIMPLE | BULK_LOGGED}

——————————————————————————
–3.To know the compatability level of all databases

select * from sys.databases

——————————————————————————

–4.To know the compatability of a particular database

select * from sys.databases where name=’database name’

——————————————————————————

–5.To know the space availability in a log file

dbcc sqlperf(logspace)

——————————————————————————

–6.To know the space availability in a data file

sp_spaceused
——————————————————————————

–7.To know the details of particular database (db name,filename,filegroup,size)

sp_helpfile
——————————————————————————

–8.To know the details of all databases (db name,size,owner,date created,status,compatability level)

sp_helpdb

——————————————————————————

–9.To know the details of database (name,filename)

select * from sys.sysaltfiles

——————————————————————————

–10.To know the details of database (db name,login name,status,command)

sp_who2

——————————————————————————

–11.command for FULL BACKUP

backup database ‘database name’ to disk=’path.bak’

——————————————————————————

–12.command for differential backup

backup database ‘aaa’ to disk=’path.bak’ with differential

——————————————————————————

–13.command for transaction log backup

backup log ‘aaa’ to disk=’d:\aaalog.trn’

——————————————————————————

–14.command for file & filegroup backup

backup database ‘aaa’ file=’aaa’, filegroup=’primary’ to disk=’d:\aaafile.bak’ with noformat

——————————————————————————

–15.command for tail log backup

backup log ‘aaa’ to disk=’d:\aaatail.trn’ with no_truncate

——————————————————————————

–16.command for mirrored backup

backup database aaa to disk=’d:\aaamirror1.bak’ mirror to disk=’d:\aaamirror2.bak’ with format

——————————————————————————

–17.command for striped backup

backup database aaa to disk=’d:\aaastriped_part1.bak’,disk=’d:\aaastriped_part2.bak’

——————————————————————————
–18.command for partial backup

backup database aaa read_write_filegroups to disk=’d:\aaapartial.bak’

——————————————————————————

–19.command for partial differential backup

backup database aaa read_write_filegroups to disk=’d:\aaapartialdiff.bak’ with differential

——————————————————————————

–20. command for copyonly backup for (data files)

backup database aaa to disk=’d:\aaa_copyonly.bak’ with copy_only

——————————————————————————

–21.command for copyonly backup for (log files)

backup log aaa to disk=’d:\aaa_copyonly_log.bak’ with copy_only

——————————————————————————

–22.To know the server role permissions

exec sp_srvrolepermission
——————————————————————————

–23.To know the particular server role permission

exec sp_srvrolepermission sysadmin

——————————————————————————

–24.To create built in administrator account

create login[builtin administrators] from windows

——————————————————————————

–25.To know the details of protocols

select * from sys.endpoints

——————————————————————————

–26.To know rows in a table

select count (*) from tablename

——————————————————————————

–27.command for restore

restore database dbname from disk=’path’ with norecovery,replace

——————————————————————————

–28.command to know the validity of a backup

restore verifyonly from disk=’path’

——————————————————————————

–29.command to know the header of backup

restore headeronly from disk=’path’

——————————————————————————

–30.command to verify the contents of backup

restore filelistonly from disk=’path’

——————————————————————————

–31.command to restore a full backup into a different location

restore DATABASE [NEW_DATABASE_NAME] FROM DISK = ‘F:\pathFULL.bak’ WITH FILE=1,
MOVE
‘OLD_FILE_NAME_mdf’ TO ‘C:\NEW_FILE_NAME….mdf’,
MOVE
‘OLD_FILE_NAME_ldf’ TO ‘C:\NEW_FILE_NAME…..ldf’;
GO

——————————————————————————

–32.commands to keep database in offline mode

alter database dbname set offline with rollback immediate

–the above option kills all the current transactions &put the database immediately in offline state

alter database dbname set offline with rollback after 5 seconds

–the above option kills the transactions after 5 seconds & then puts the db in offline state

alter database dbname set offline with no_wait

–the above option can be used when we dont want the command to be blocked

——————————————————————————

–33.command to keep the database in online state

alter database dbname set online

——————————————————————————

–34.command for backup of database with PASSWORD protected?

BACKUP DATABASE [DATABASE NAME] TO DISK=’C:\path.BAK’ WITH MEDIAPASSWORD=’password’

——————————————————————————

–35.Restoring database with specifying PASSWORD

RESTORE DATABASE [DATABASE NAME] FROM DISK=’C:\path.BAK’ WITH MEDIAPASSWORD=’password’

——————————————————————————

–36.command to create database

create database dbname

——————————————————————————

–37.command to change the collation settings

alter database dbname collate sql_latin1_general_cp1_cs_as

——————————————————————————

–38.command to set the database in a single user mode

alter database dbname set single_user

——————————————————————————

–39.command to create a file group

alter database dbname add filegroup filegroup name

——————————————————————————

–40.command to change the logical name of a file

alter database dbname modify file(name=’old filename’,newname=’new filename’)

——————————————————————————

–41.command to disable autogrowth of a file

alter dbname modify file(name=’filename’,filegrowth=0)

——————————————————————————

–42.command to resize a file

alter dbname modify file(name=’filename’,size=’kb’)

——————————————————————————

–43.command to make database read only

alter database dbname set read_only

——————————————————————————

–44.command to remove file from database

alter database dbname remove file filename

——————————————————————————

–45.command to change the path of a file

alter database dbname modify file(name=’logical filename’,filename=’path.mdf’)
alter database dbname modify file(name=’logical filename’,filename=’path.ldf’)

——————————————————————————

–46.command to put database in a restricted mode

alter database dbname set restricted_user

——————————————————————————

–48.command to know the edition

select @@version
select serverproperty (‘edition’)

——————————————————————————

–49.command to know how much space is available in logfile

dbcc sqlperf(logspace)

——————————————————————————

–50.command to know how much space is available in datafile

sp_spaceused

——————————————————————————

–51.command to know database name and size

sp_databases

——————————————————————————

–52.command to know the database id

select db_id(databasename)

——————————————————————————

–53.command for point in time recovery

restore log dbname from disk=’path’ stopat ‘dd:mm:yy hh:mm:ss’

——————————————————————————

–54.command to delete older backups

use msdb
go
exec sp_delete_database_backuphistory ‘dbname’

——————————————————————————

–55.command to delete older backups beyond a date

use msdb
go
exec sp_delete_backuphistory’date’

——————————————————————————

–56.command to know job information

use msdb
select * from sysjobs

——————————————————————————

–57.command to know database is really in suspect mode or not

select databasepropertyex(‘dbname”status’)

——————————————————————————

— 58. SHrink TempDB, log files

USE [tempdb]
–Check Database File Size before shrink
SELECT name, size/128.0 – CAST(FILEPROPERTY(name, ‘SpaceUsed’) AS int)/128.0 AS AvailableSpaceInMB
FROM sys.database_files

–Shrink tempdb Log File (.ldf)
DBCC SHRINKFILE(templog,1) — run this line > SQL 2005
BACKUP LOG tempdb WITH TRUNCATE_ONLY — Truncate_Only, not available > SQL 2005
DBCC SHRINKFILE(templog,1)

–Shrink tempdb Data File (.mdf)
DBCC SHRINKFILE(tempdb7,1) — run this line > SQL 2005
BACKUP LOG tempdb WITH TRUNCATE_ONLY — Truncate_Only, not available > SQL 2005
DBCC SHRINKFILE(tempdev,1)

–Check Database File Size after shrink
SELECT name, size/128.0 – CAST(FILEPROPERTY(name, ‘SpaceUsed’) AS int)/128.0 AS AvailableSpaceInMB
FROM sys.database_files

 

Advertisements

SQL Server Backups & Restores


Introduction – Backups and Restores in SQL Server

This article is based on a real life experience of the author while working with database backup and restore during his consultancy work for various organizations. We will go over the following important concepts of database backup and restore.

  1. Conventional Backup and Restore
  2. Split File Backup and Restore
  3. Mirror File Backup
  4. Understanding FORMAT Clause
  5. Miscellaneous details about Backup and Restore

Note:

Before running all the examples, make sure that you have the required folders created on your drive. It is mandatory to create Backup folders prior to creating backup files using SQL Server.

In our example, we will require the following folders:

  • C:\Backup\SingleFile
  • C:\Backup\MultiFile
  • C:\Backup\MirrorFile

Conventional and Split File Backup and Restore

Just a day before working on one of the projects, I had to take a backup of one database of 14 GB. My hard drive lacked sufficient space at that moment. Fortunately, I had two 8 GB USB Drives with me. Now, the question was how to take a backup in two equal sizes, each of 7 GB, so I can fit them on each USB drive. Well, conventional backup takes one large backup in one file. However, SQL Server backup command can take backups in two or more split parts.

Let us see an example of a conventional one-file backup using the AdventureWorks database.

BACKUP DATABASE AdventureWorks

TO DISK = 'C:\Backup\SingleFile\AdventureWorks.bak'

GO

The result is displayed below. Here, the backup is taken in a single file.

Now, let us see how we can split one database into two different database files. This method is very similar to taking a single-file backup. By simply adding an additional DISK option we can split the files backup files.

BACKUP DATABASE AdventureWorks

TO  DISK = 'C:\Backup\MultiFile\AdventureWorks1.bak',

DISK = 'C:\Backup\MultiFile\AdventureWorks2.bak',

DISK = 'C:\Backup\MultiFile\AdventureWorks3.bak'

GO

In the previous example, we can clearly see that backup is split into three equal parts of the original backup file size.

Restoring a backup from a single-file backup is quite easy. Let us go over an example where we restore the AdventureWorks database from a single backup file.

RESTORE DATABASE AdventureWorks

FROM  DISK = 'C:\Backup\SingleFile\AdventureWorks.bak'

GO

Running the above script will give a successful message.

Now let us see an example where we restore a database from a split file. This method is very similar to restoring a database from a single file; just add an additional DISK option.

RESTORE DATABASE [AdventureWorks]

FROM    DISK = N'C:\Backup\MultiFile\AdventureWorks1.bak',

DISK = N'C:\Backup\MultiFile\AdventureWorks2.bak',

DISK = N'C:\Backup\MultiFile\AdventureWorks3.bak'

GO

Running the above script will give a successful message as shown in the image below.

Make sure that while restoring database, the database is not in use, otherwise it will give an error of database in use. In the event of an error taking place, close all the connections and re-attempt to restore the database.

Mirror Backup of the file

It is quite a common practice to create an exact copy of the backup and store it to several places to deal with any catastrophes which might affect the place where the database is stored. Once a full backup is accomplished DBAs generally copy the database to another location in their network using a third party tools like robocopy or native DOS commands like xcopy.

In SQL Server 2005 and later versions, there is a Mirror command that makes a copy of the database backup to different locations while taking the original backup. The maximum limit of additional locations that can be specified with MIRROR clause is 3.

Mirrored backup can be taken in local computer system as well as in a local network. Let us now see two examples of mirror backup.

Example 1. Single File Backup to Multiple Locations using Mirror in syntax

BACKUP DATABASE AdventureWorks

TO DISK = 'C:\Backup\SingleFile\AdventureWorks.bak'

MIRROR TO DISK = 'C:\Backup\MirrorFile\AdventureWorks.bak'

WITH FORMAT

GO

If this command is being run for the first time, it is mandatory to use the WITH FORMAT clause; but for sub sequential runs it is not required. WITH FORMAT reinitializes the backup.

When checked in both the folders ‘SingleFile’ and ‘MirrorFile’, backup files are exactly the same files. As mentioned earlier, four mirror backup can be specified in total.

Example 2. Split File Backup to Multiple Locations using Mirror

We have earlier seen an example where we can have multiple split files of large database backup files. SQL Server Mirror functionality also supports backup of the split files.

 

BACKUP DATABASE AdventureWorks

TO  DISK = 'C:\Backup\MultiFile\AdventureWorks1.bak',

DISK = 'C:\Backup\MultiFile\AdventureWorks2.bak',

DISK = 'C:\Backup\MultiFile\AdventureWorks3.bak'

MIRROR TO   DISK = 'C:\Backup\MirrorFile\AdventureWorks1.bak',

DISK = 'C:\Backup\MirrorFile\AdventureWorks2.bak',

DISK = 'C:\Backup\MirrorFile\AdventureWorks3.bak'

WITH FORMAT

GO

All the mirror sets will need the same number of DISK clauses as the original backup media.

Mirrored database backup can be restored using the same method as the original backup. Mirrored backup is in fact an exact replica of the original backup.

Understanding the FORMAT Clause

The FORMAT clause is used to reinitiate a backup media. Although it is a very useful clause it should be used with caution. When the clause is used it erases everything present in backup media. I have noticed that some DBAs are confused while taking a backup on a local disk where they have SQL Server installed. They have a misconception that if the format command is used, it will erase the complete disk including the SQL Server installation. However, the fact is that SQL Server format clause is quite different from OS format. The effect of SQL Server format clause is limited to a folder or path specified in the DISK clause.

In our example, when the FORMAT clause is specified, it will format only folders like C:\Backup\MultiFile\ or C:\Backup\SingleFile.

**************************************************

Related Errors

Error 3010

Invalid backup mirror specification. All mirrors must have the same number of members.

This error can show up while taking a mirrored database backup along with a regular backup; and DISK and MIRROR TO DISK do not match accurately.

The following image demonstrates how the error takes place.

To fix the error, match the members of DISK and MIRROR TO DISK to each other.

*********************************************************

Error 3215

Use WITH FORMAT to create a new mirrored backup set

This error can spring up when a new backup is initiated and an existing media header needs to be reset for all headers on the backup media. If there is already a backup on the media, it will display this error and prevent backup from being overwritten. To fix this error, use WITH FORMAT as shown in an earlier example.

script to find out all the Orphan Users on the SQL Server


–We can use below script to find out the Orphan Users on the Server

IF

EXISTS(SELECT*FROMsysobjectsWHERE id =OBJECT_ID(N'[dbo].[usp_ShowOrphanUsers]’)ANDOBJECTPROPERTY(id,N’IsProcedure’)= 1

)

 

DROP

PROCEDURE [dbo].[usp_ShowOrphanUsers]

GO

CREATE

PROC dbo.usp_ShowOrphanUsers

AS

BEGIN

CREATE

TABLE #Results

(

[Database Name] sysname

,

 

[Orphaned User]

sysname,

[Type User]

sysname)

SET

NOCOUNTON

DECLARE

@DBName sysname, @Qry NVARCHAR(4000

)

 

SET

@Qry =

 

SET

@DBName =

 

WHILE

@DBName ISNOT

NULL

 

BEGIN

SET

@DBName

=

(

 

 

SELECT

MIN(name

)

 

FROM

master..

sysdatabases

 

WHERE

name NOT

IN

(

 

 

‘master’,‘model’,‘tempdb’,‘msdb’,‘distribution’,‘pubs’,‘northwind’,‘dba_database’

)

AND

 

 

DATABASEPROPERTY(name,‘IsOffline’)= 0

AND

DATABASEPROPERTY(name,‘IsSuspect’)= 0

AND

name > @DBName

)

IF

@DBName ISNULL

BREAK

SET

 

 

@Qry =‘ SELECT ”’+ @DBName +

”’ AS [Database Name],

CAST(name AS sysname) COLLATE Latin1_General_CI_AS AS [Orphaned User],

 

[Type User] =

 

CASE isntuser

 

WHEN ”0” THEN ”SQL User”

 

WHEN ”1” THEN ”NT User”

 

ELSE ”Not Available”

 

END

 

FROM ‘

 

 

+QUOTENAME(@DBName)+

‘..sysusers su

WHERE su.islogin = 1

 

AND su.name NOT IN (”INFORMATION_SCHEMA”, ”sys”, ”guest”, ”dbo”, ”system_function_schema”)

 

AND NOT EXISTS (SELECT 1 FROM master..syslogins sl WHERE su.sid = sl.sid)’

 

 

INSERT

INTO #Results

EXEC

master..sp_executesql@Qry

END

SELECT

 

*

 

FROM

#Results

ORDER

BY [Database Name], [Orphaned User]

IF

@@ROWCOUNT= 0

PRINT

 

‘No orphaned users exist in this server.’

 

END

GO

–Execute the proc

EXEC

[dbo].[usp_ShowOrphanUsers]

Script to get Long Running queries, current running queries in SQL Server


— Run following query to find longest running query using T-SQL.

SELECT

DISTINCTTOP 10

t

.TEXT QueryName,

s

.execution_count AS ExecutionCount,

s

.max_elapsed_time AS MaxElapsedTime,

ISNULL

(s.total_elapsed_time / s.execution_count, 0)AS AvgElapsedTime

,

 

s

.creation_time AS LogCreatedOn,

ISNULL

(s.execution_count /DATEDIFF(s, s.creation_time,GETDATE()), 0)AS FrequencyPerSec

FROM

sys.dm_exec_query_stats s

CROSS

APPLYsys.dm_exec_sql_text( s.sql_handle) t

ORDER

 

BY

 

s

.max_elapsed_time DESC

GO

 

—————————————————————–

— Identifying the current running queries :

SELECT

r.session_id, r.status, r.start_time, r.command, s.

text

FROM

 

 

sys.dm_exec_requests r

CROSS

APPLYsys.dm_exec_sql_text(r.sql_handle) s

WHERE

r.status=

‘running’

 

 

 

 

 

 

 

———————————————————————————

— the below query will return the top 10 I/O intensive queries, that are causing my tempdb to bloat like crazy.

 

SELECT

top 10(total_logical_reads/execution_count

),

(

 

 

total_logical_writes/execution_count

),

(

 

 

total_physical_reads/execution_count

),

 

Execution_count

,sql_handle, plan_handle

FROM

sys.dm_exec_query_stats

ORDER

BY (total_logical_reads + total_logical_writes)

Desc

 

 

 

 

 

————————————————————————————

/***SQL Server 2005 keeps alot of good information in the dynamic management views about this kind of thing. Below are the 2 main queries I use to find slow running application queries in our systems.

Queries taking longest elapsed time:***/

SELECT

TOP 100

qs

.total_elapsed_time / qs.execution_count / 1000000.0 AS average_seconds,

qs

.total_elapsed_time / 1000000.0 AS total_seconds,

qs

.execution_count,

SUBSTRING

(qt.text,qs.statement_start_offset/2,

(

CASEWHEN qs.statement_end_offset =1

THEN

LEN(CONVERT(NVARCHAR(MAX), qt.text))* 2

ELSE

qs.statement_end_offset END qs.statement_start_offset)/2)AS individual_query

,

 

o

.name ASobject_name,

DB_NAME

(qt.dbid)AS database_name

FROM

sys.dm_exec_query_stats qs

CROSS

APPLYsys.dm_exec_sql_text(qs.sql_handle)as qt

LEFT

OUTERJOINsys.objects o ON qt.objectid = o.

object_id

 

where

qt.dbid=DB_ID

()

 

ORDER

BY average_seconds DESC;

Get Index suggestions from SQL Server Database


— Potentially Useful Indexes

select

d

.*

 

, s.avg_total_user_cost, s.avg_user_impact, s.last_user_seek,s.unique_compiles

from

sys.dm_db_missing_index_group_stats s,sys.dm_db_missing_index_groups g

,sys.dm_db_missing_index_details d

where

s.group_handle = g.index_group_handle

and

d.index_handle = g.index_handle

order

by s.avg_user_impact

desc

go

 

 

— suggested index columns and usage

declare

@handle

int

select

 

 

@handle = d.index_handle

from

sys.dm_db_missing_index_group_stats s,sys.dm_db_missing_index_groups g

,sys.dm_db_missing_index_details d

where

s.group_handle = g.index_group_handle

and

d.index_handle = g.index_handle

select

*

from

sys.dm_db_missing_index_columns(@handle

)

 

order

by column_id

Get Index recommendations from SQL Server Database


SELECT

sys.objects.name,

(

avg_total_user_cost * avg_user_impact)*(user_seeks + user_scans)as Impact,

cast

(‘CREATE NONCLUSTERED INDEX ~NewNameHere~ ON ‘+sys.objects.name +‘ ( ‘+ mid.equality_columns +

CASE

WHEN mid.inequality_columns ISNULLTHENELSECASEWHEN mid.equality_columns ISNULLTHENELSE‘,’END+

mid

.inequality_columns END+‘ ) ‘+CASEWHEN mid.included_columns ISNULLTHENELSE‘INCLUDE (‘+ mid.included_columns +‘)’END+

‘;’

asxml)AS CreateIndexStatement, mid.equality_columns, mid.inequality_columns, mid.included_columns FROM

sys

.dm_db_missing_index_group_statsAS migs INNERJOINsys.dm_db_missing_index_groupsAS mig ON migs.group_handle = mig.index_group_handle

INNER

JOINsys.dm_db_missing_index_detailsAS mid ON mig.index_handle = mid.index_handle INNERJOINsys.objectsWITH (nolock)

ON

mid.object_id=sys.objects.object_idWHERE (migs.group_handle IN(SELECTTOP (500) group_handle FROM

sys

.dm_db_missing_index_group_statsWITH (nolock)ORDERBY (avg_total_user_cost * avg_user_impact)*(user_seeks + user_scans)DESC

))

 

ORDER

BY 2 DESCSELECTsys.objects.name,(avg_total_user_cost * avg_user_impact)*(user_seeks + user_scans)as Impact,

cast

(‘CREATE NONCLUSTERED INDEX ~NewNameHere~ ON ‘+sys.objects.name +‘ ( ‘+ mid.equality_columns +CASE

WHEN

mid.inequality_columns ISNULLTHENELSECASEWHEN mid.equality_columns ISNULLTHENELSE‘,’END+ mid.inequality_columns END+‘ ) ‘

+

CASEWHEN mid.included_columns ISNULLTHENELSE‘INCLUDE (‘+ mid.included_columns +‘)’END+‘;’asxml)AS CreateIndexStatement,

mid

.equality_columns, mid.inequality_columns, mid.included_columns FROMsys.dm_db_missing_index_group_statsAS migs INNER

JOIN

sys.dm_db_missing_index_groupsAS mig ON migs.group_handle = mig.index_group_handle INNERJOINsys.dm_db_missing_index_detailsAS mid ON

mig

.index_handle = mid.index_handle INNERJOINsys.objectsWITH (nolock)ON mid.object_id=sys.objects.object_idWHERE

(migs.group_handle IN(SELECTTOP (500) group_handle FROMsys.dm_db_missing_index_group_statsWITH (nolock)ORDERBY

(avg_total_user_cost * avg_user_impact)*(user_seeks + user_scans)DESC))ORDERBY 2

DESC

 

Get performance CACHE from SQL Server


——————————————————————————-

/* SQL Server stores the procedure cache in 8KB data pages.

You can use the dynamic management view sys.dm_os_memory_cache_counters to provide a summary of how

the cache is allocated using this query:*/

/* CACHESTORE_OBJCP. These are compiled plans for stored procedures, functions and triggers.

CACHESTORE_SQLCP. These are cached SQL statements or batches that aren’t in stored procedures, functions and triggers. This includes any dynamic SQL or raw SELECT statements sent to the server.

CACHESTORE_PHDR. These are algebrizer trees for views, constraints and defaults. An algebrizer tree is the parsed SQL text that resolves the table and column names.

*/

SELECT

TOP 6LEFT([name], 20)as [name],

LEFT([type], 20)as [type],

[single_pages_kb]

+ [multi_pages_kb] AS cache_kb,

[entries_count]

FROM

sys.dm_os_memory_cache_counters

order

by single_pages_kb + multi_pages_kb

DESC

 

 

 

———————————————————————————-

/*We can see individual cache entries by using the sys.dm_exec_cached_plans dynamic management view.*/

 

SELECT

usecounts, cacheobjtype, objtype, plan_handle

FROM

sys.

dm_exec_cached_plans

 

 

————————————————————————————-

 

/* This query lists the most used query plans.

 

In includes plans for stored procedures, adhoc or dynamic SQL, triggers, prepared SQL and views.

 

*/

 

 

select

TOP 100

objtype

,

p

.size_in_bytes,LEFT([sql].[text], 100)as [text]

from

sys.dm_exec_cached_plans p

outer

applysys.dm_exec_sql_text(p.plan_handle)

sql

ORDER

 

 

BY usecounts

DESC

 

—————————————————————————————-

Get High CPU Sessions & Queries from SQL Server


–CREATE

–procedure HighCpuSessions_in_Production

–as

 

INSERT

INTO dbo.HighCpuSessions

SELECT

GETDATE(),r.session_id,status,SUBSTRING(qt.text,

r

.statement_start_offset/2,

(

CASE

WHEN

r.statement_end_offset =1

THEN

LEN(CONVERT(NVARCHAR(MAX), qt.text))* 2

ELSE

r.statement_end_offset

END

r.statement_start_offset)/2)

AS

query_text

— this is the statement executing right now

 

,

qt.dbid

,

qt.objectid

,

r.cpu_time

,

r.total_elapsed_time

,

r.reads

,

r.writes

,

r.logical_reads

,

r.scheduler_id

FROM

sys.dm_exec_requests r

CROSS

APPLYsys.dm_exec_sql_text(sql_handle)AS qt

WHERE

r.session_id >50

order

by CPU_time desc

————————————————————————————————————————————————-

–create

–procedure TOP50ExecutedQueries

–as

 

select

top 50

(

total_logical_reads + total_logical_writes)as total_logical_io,

(

total_logical_reads/execution_count)as avg_logical_reads,

(

total_logical_writes/execution_count)as avg_logical_writes,

(

total_physical_reads/execution_count)as avg_phys_reads,

substring

(st.text,(qs.statement_start_offset/2)+1,

((

case qs.statement_end_offset

when

1 thendatalength(st.text)

else

qs.statement_end_offset

end

qs.statement_start_offset)/2)+ 1)as statement_text,

*

 

from

 

sys

.dm_exec_query_statsas qs

cross

applysys.dm_exec_sql_text(qs.sql_handle)as st

order

by

total_logical_io

desc

Find all Databases size in SQL Server


CREATE

PROCEDURE PROD_DB_SIZE

AS

 

BEGIN

set

 

 

nocount

on

declare

 

 

@name

sysname

declare

 

 

@SQL nvarchar(600

)

 

/* Use temporary table to sum up database size w/o using group by */

create

table #databases

(

 

DATABASE_NAME

sysnameNOTNULL,

size

intNOTNULL)

declare

c1 cursorfor

select

name frommaster.dbo.

sysdatabases

 

where

has_dbaccess(name)= 1 and name notin(‘master’,‘model’,‘msdb’,‘Northwind’,‘pubs’,‘tempdb’)

— Only look at databases to which we have access

 

open

c1

fetch

c1 into @name

while

@@fetch_status>= 0

begin

select

@SQL =

‘insert into #databases

select N”’

 

 

+ @name +

”’, sum(size) from ‘

 

+

QuoteName(@name)+

‘.dbo.sysfiles’

 

/* Insert row for each database */

execute

(@SQL

)

 

fetch

c1 into @name

end

deallocate

c1

select

 

[DATABASE_NAME]

,

[DATABASE_SIZE in GB]

= size*8/1024 /* Convert from 8192 byte pages to K */

from

#databases

— order by DATABASE_SIZE

order

by 2

desc

END

 

 

— EXEC PROD_DB_SIZE

DB Log file location, file size growth and percent


–This script pulls all data and log files

–associated with any user db’s

BEGIN

CREATE TABLE #FILEINFO

(DatabaseName VARCHAR(100),

PhysicalFileName NVARCHAR(520),

FileSizeMB INT,

Growth VARCHAR(100))

DECLARE @command VARCHAR(5000)

SELECT @command = ‘Use [‘ + ‘?’ + ‘] select ‘ + ”” + ‘?’ + ”” + ‘ AS DatabaseName, 

sysfiles.filename AS PhysicalFileName,

CAST(sysfiles.size/128.0 AS int) AS FileSize,

CASE

 WHEN status & 0x100000 = 0

     THEN convert(varchar,ceiling((growth * 8192.0)/(1024.0*1024.0))) + ” MB”

  ELSE STR(growth) + ” %”

END growth

FROM dbo.sysfiles’

INSERT #FILEINFO EXEC sp_MSForEachDB @command

SELECT * FROM #FILEINFO

order by DatabaseName, PhysicalFileName

DROP TABLE #FILEINFO

END

GO

Detect highest CPU consuming queries


Create

Procedure SQL_HighCPU

as

Set

NOCOUNT

ON

————————————————

— Description: This procedure helps you detect the highest consuming CPU queries

— Compatibility: SQL Server 2005 & 2008

—————————————————

SELECT

TOP 50(a.total_worker_time/a.execution_count)as [Avg_CPU_Time]

,

Convert

(Varchar,Last_Execution_Time)as‘Last_execution_Time’

,

Total_Physical_Reads

,

SUBSTRING

(b.text,a.statement_start_offset/2

,

(casewhen a.statement_end_offset =1 thenlen(convert(nvarchar(max), b.text))* 2 else

a

.statement_end_offset end a.statement_start_offset)/2)as Query_Text,

dbname

=Upper(db_name(b.dbid)),

b

.objectid as‘Object_ID’

FROM

sys.dm_exec_query_stats a

cross

apply

sys

.dm_exec_sql_text(a.sql_handle)as b

ORDER

BY

[Avg_CPU_Time]

DESC

 

— EXEC SQL_HighCPU

Linked Server


exec sp_dropserver @server = ‘WMSREPLICADB’ ,@droplogins = ‘droplogins’

go

 

EXEC sp_addlinkedserver

@server = ‘WMSREPLICADB’ ,

@srvproduct = ”,

@provider= ‘SQLOLEDB’,

@datasrc=  ‘100.64.0.152’  

GO

 

sp_addlinkedsrvlogin

@rmtsrvname = ‘WMSREPLICADB’  

 ,@useself = ‘FALSE’    

 ,@locallogin = ‘SQLDBA’     

,@rmtuser  = ‘portaluser’     

,@rmtpassword =  ‘portaluser’

GO

 

select top 1 * from [WMSREPLICADB].[geo].[geo1].[sku]

GO

 

EXEC master.dbo.sp_addlinkedsrvlogin @rmtsrvname = N’WMSREPLICADB’, @locallogin = NULL , @useself = N’False’, @rmtuser = N’portaluser’, @rmtpassword = N’portaluser’

Who owns SQL Server Agent JOB ?


/** Verify / Determine / Identify the owner of SQL JOBS **/

USE

MSDB

GO

SELECT

j.[name] AS‘JobName’,

Enabled

=CASEWHEN j.Enabled= 0 THEN

‘No’

ELSE

‘Yes’

END

,

l

.[name] AS‘OwnerName’

FROM

MSDB.dbo.sysjobs j

INNER

JOINMaster.dbo.syslogins l

ON

j.owner_sid = l.

sid

ORDER

BY j.[name]

GO

 

—————————————————————————————————

 

/** To Change Ownership of SQL Server Agent JOB.

change @job_name and @owner_login_name **/

EXEC

SP_UPDATE_JOB@job_name =‘DB Weekly MaintenancePlan.Subplan_1’, @owner_login_name =

‘sqldbaadmin’

Determine SQL Server JOB, activity, status, run details


 

SET

 

NOCOUNT

ON

 

 

–Checking for SQL Server verion

 

IF

 

CONVERT(tinyint,(SUBSTRING(CONVERT(CHAR(1),SERVERPROPERTY(‘productversion’)),1,1)))<> 8

 

BEGIN

 

—This is for SQL 2k5 and SQL2k8 servers

 

SET

 

NOCOUNT

ON

SELECT

 

 

Convert(varchar(20),SERVERPROPERTY(‘ServerName’))AS ServerName

,

 

 

j

.name AS job_name,

CASE

 

j.enabledWHEN 1 THEN‘Enabled’Else‘Disabled’ENDAS job_status

,

 

 

CASE

 

jh.run_status WHEN 0 THEN

‘Error Failed’

 

WHEN 1 THEN‘Succeeded’WHEN 2 THEN‘Retry’WHEN 3 THEN‘Cancelled’WHEN 4 THEN‘In Progress’ELSE‘Status Unknown’ENDAS‘last_run_status’,

ja

.run_requested_date as last_run_date,

CONVERT

 

(VARCHAR(10),CONVERT(DATETIME,RTRIM(19000101))+(jh.run_duration * 9 + jh.run_duration % 10000 * 6 + jh.run_duration % 100 * 10)/ 216e4,108)AS run_duration

,

 

 

ja

.next_scheduled_run_date,

CONVERT

 

(VARCHAR(500),jh.message)AS step_description

 

FROM

 

(

 

msdb.dbo.sysjobactivity ja LEFTJOIN msdb.dbo.sysjobhistory jh ON ja.job_history_id = jh.instance_id

)

join

 

 

msdb.dbo.sysjobs_view j on ja.job_id = j.job_id

 

WHERE

 

ja.session_id=(SELECTMAX(session_id)from msdb.dbo.sysjobactivity)ORDERBY job_name,job_status

 

END

 

ELSE

 

BEGIN

 

–This is for SQL2k servers

 

SET

 

NOCOUNT

ON

DECLARE

 

 

@SQL VARCHAR(5000

)

 

 

–Getting information from sp_help_job to a temp table

 

SET

 

@SQL=

‘SELECT job_id,name AS job_name,CASE enabled WHEN 1 THEN ”Enabled” ELSE ”Disabled” END AS job_status,

CASE last_run_outcome WHEN 0 THEN ”Error Failed”

 

WHEN 1 THEN ”Succeeded”

 

WHEN 2 THEN ”Retry”

 

WHEN 3 THEN ”Cancelled”

 

WHEN 4 THEN ”In Progress” ELSE

 

”Status Unknown” END AS last_run_status,

 

CASE RTRIM(last_run_date) WHEN 0 THEN 19000101 ELSE last_run_date END last_run_date,

 

CASE RTRIM(last_run_time) WHEN 0 THEN 235959 ELSE last_run_time END last_run_time,

 

CASE RTRIM(next_run_date) WHEN 0 THEN 19000101 ELSE next_run_date END next_run_date,

 

CASE RTRIM(next_run_time) WHEN 0 THEN 235959 ELSE next_run_time END next_run_time,

 

last_run_date AS lrd, last_run_time AS lrt

 

INTO ##jobdetails

 

FROM OPENROWSET(”sqloledb”, ”server=(local);trusted_connection=yes”, ”set fmtonly off exec msdb.dbo.sp_help_job”)’

 

 

 

exec

 

(@SQL

)

 

 

–Merging run date & time format, adding run duration and adding step description

 

select

 

Convert(varchar(20),SERVERPROPERTY(‘ServerName’))AS ServerName,jd.job_name,jd.job_status,jd.last_run_status

,

 

 

CONVERT

 

(DATETIME,RTRIM(jd.last_run_date))+(jd.last_run_time * 9 + jd.last_run_time % 10000 * 6 + jd.last_run_time % 100 * 10)/ 216e4 AS last_run_date

,

 

 

CONVERT

 

(VARCHAR(10),CONVERT(DATETIME,RTRIM(19000101))+(jh.run_duration * 9 + jh.run_duration % 10000 * 6 + jh.run_duration % 100 * 10)/ 216e4,108)AS run_duration

,

 

 

CONVERT

 

(DATETIME,RTRIM(jd.next_run_date))+(jd.next_run_time * 9 + jd.next_run_time % 10000 * 6 + jd.next_run_time % 100 * 10)/ 216e4 AS next_scheduled_run_date

,

 

 

CONVERT

 

(VARCHAR(500),jh.message)AS step_description

 

from

 

(##jobdetails jd LEFTJOIN msdb.dbo.sysjobhistory jh ON jd.job_id=jh.job_id AND jd.lrd=jh.run_date AND jd.lrt=jh.run_time)where step_id=0 or step_id is

null

 

 

order

 

by jd.job_name,jd.job_status

 

–dropping the temp table

 

drop

 

table ###jobdetails

 

END

 

Rebuild all indexes on all user tables in a database


DECLARE @Database VARCHAR(255)
DECLARE @Table VARCHAR(255)
DECLARE @cmd NVARCHAR(500)
DECLARE @fillfactor INT

SET @fillfactor = 90

DECLARE DatabaseCursor CURSOR FOR
SELECT name FROM MASTER.dbo.sysdatabases
WHERE name NOT IN (‘master’,’msdb’,’tempdb’,’model’,’distribution’)
ORDER BY 1

OPEN DatabaseCursor

FETCH NEXT FROM DatabaseCursor INTO @Database
WHILE @@FETCH_STATUS = 0
BEGIN

SET @cmd = ‘DECLARE TableCursor CURSOR FOR SELECT ”[” + table_catalog + ”].[” + table_schema + ”].[” +
table_name + ”]” as tableName FROM ‘ + @Database + ‘.INFORMATION_SCHEMA.TABLES
WHERE table_type = ”BASE TABLE”’

— create table cursor
EXEC (@cmd)
OPEN TableCursor

FETCH NEXT FROM TableCursor INTO @Table
WHILE @@FETCH_STATUS = 0
BEGIN

IF (@@MICROSOFTVERSION / POWER(2, 24) >= 9)
BEGIN
— SQL 2005 or higher command
SET @cmd = ‘ALTER INDEX ALL ON ‘ + @Table + ‘ REBUILD WITH (FILLFACTOR = ‘ + CONVERT(VARCHAR(3),@fillfactor) + ‘)’
EXEC (@cmd)
END
ELSE
BEGIN
— SQL 2000 command
DBCC DBREINDEX(@Table,’ ‘,@fillfactor)
END

FETCH NEXT FROM TableCursor INTO @Table
END

CLOSE TableCursor
DEALLOCATE TableCursor

FETCH NEXT FROM DatabaseCursor INTO @Database
END
CLOSE DatabaseCursor
DEALLOCATE DatabaseCursor

BizTalk Server 2009 – Architecture


Recently needed to put forward a proposal for a BizTalk 2009 implementation and as a part of this needed to describe some of the basic architecture options available for consideration. 

While I already had an idea of the type of environment that I would be looking to recommend, I felt that presenting a range of options while trying to explain some of the strengths and weaknesses of those options was a good place to start.  These outline architecture options should be equally valid for any version of BizTalk Server from 2004, through 2006 and R2, up to 2009.

The following diagram shows a crude representation of the common implementation options to consider when designing a BizTalk environment.

Each of these options provides differing levels of resilience in the case of failure or disaster, with the later options also providing more scope for performance tuning and scalability.

Some of the options presented above make use of clustering. Clustering may best be described as a technology that automatically allows one physical server to take over the tasks and responsibilities of another physical server that has failed. Given that all computer hardware and software will eventually fail, the goal of clustering is to ensure that mission-critical applications will have little or no downtime when such a failure occurs. Clustering can also be configured to provide load balancing, which should generally lead to performance gains and increased capacity and throughput.

(A) Single Servers

This option is the most basic BizTalk implementation that should be considered. It involves the deployment of a single BizTalk server in conjunction with a single SQL server. This configuration does not provide for any resilience in the case of the failure of either server. It is however the cheapest and easiest to implement option of those available.

Using a single BizTalk server does not provide for the level of performance tuning that is otherwise available when using more than one BizTalk server in a cluster.

The common edition of BizTalk used in single server implementations is the standard edition. It should be noted however that if future demand requires increased capacity for a solution, this BizTalk edition is limited to scaling up the implementation and not scaling out the number of servers in use. Any need to scale out the solution would require an upgrade to the enterprise edition of BizTalk.

(B) Single BizTalk Server with Clustered SQL Servers

This option uses a single BizTalk server with a cluster of SQL servers. By utilising clustered SQL servers we can ensure that there is some resilience to the implementation in respect of the databases that BizTalk relies on to operate. The clustering of two SQL servers is possible with the standard edition but to go beyond this would require the enterprise level edition. While this option offers improved resilience over option (A) it does still present a potential single point of failure at the BizTalk server.

Using a single BizTalk server does not provide for the level of performance tuning that is otherwise available when using more than one BizTalk server in a cluster.

The common edition of BizTalk used in single server implementations is the standard edition. It should be noted however that if future demand requires increased capacity for a solution, this BizTalk edition is limited to scaling up the implementation and not scaling out the number of servers in use. You are also unable to take advantage of multiple message boxes, which would allow us to balance the SQL load in the event of any bottlenecks in this area of the implementation. Any need to scale out the solution would require an upgrade to the enterprise edition of BizTalk.

(C) Clustered BizTalk Servers with Clustered SQL Servers

This option makes use of a cluster of BizTalk servers with a cluster of SQL servers to offer high availability and resilience in the case of failure of either of the server types involved. Clustering of BizTalk is only available with the enterprise edition of the product. Clustering of two SQL servers is possible with the standard edition but to go beyond this would require the enterprise level edition. 

The use of a BizTalk cluster also provides for the ability to balance load across the servers and gives more scope for performance tuning any implemented solutions. It is also possible to add more BizTalk servers to an existing cluster, giving scope for scaling out the solution as future demand requires.

This might be seen as the middle cost option, providing a good level of protection in the case of failure, a decent level of future proofing, but at a higher cost than the single BizTalk server implementations.

(D) Clustered BizTalk Servers with Clustered SQL Servers – with disaster recovery/service continuity

This option is similar to that offered by (C) and makes use of a cluster of BizTalk servers with a cluster of SQL servers to offer high availability and resilience in case of failure of either of the server types involved. Clustering of BizTalk is only available with the enterprise edition of the product. Clustering of two SQL servers is possible with the standard edition but to go beyond this would require the enterprise level edition. 

As with (C) the use of a BizTalk cluster also provides for the ability to balance load across the servers and gives more scope for performance tuning the implemented solution. It is also possible to add more BizTalk servers to an existing cluster, giving scope for scaling the solution out as future demand requires.

In this scenario however, we would be including some form of disaster recovery or service continuity. An example of this would be making use of multiple sites, with the BizTalk server cluster operating across sites to offer resilience in case of the loss of one or more sites. In this scenario there are options available for the SQL implementation depending on the network implementation; making use of either one cluster per site or a single SQL cluster across the network. A multi-site SQL implementation would require some form of data replication across the sites involved.

This is obviously an expensive and complex option, but does provide an extraordinary amount of protection in the case of failure.

What is BIZ Talk Server?

BizTalk is a messaging based integration tool. It consists of several tools like Business Processes (Orchestrations), Business Activity Monitoring (BAM), HAT (Health and Activity Tracking), Rules Engines, BizTalk Server Administrative Console etc.

BizTalk Server 2006 R2 builds upon the Business Process Management and SOA/ESB capabilities and address core challenges found in key vertical industries such as manufacturing and retail. Native capabilities of BTS R2 include support for Electronic Data Interchange (EDI) and AS2 ,Windows Workflow Foundation, WCF as well as RFID.

BizTalk server 2006 R2 application can be created using Visual Studio 2005 provided BizTalk Server SDK is installed into the system. A standard BizTalk Server application consists of Schema, Mapping, and Orchestrations. The heart of the BizTalk Server application is schema that is used to define the message format of source and destination data.

BizTalk Server has capability to talk with any kind of legacy system as it only understand the plain text data (in the form of xml), in order to talk with different systems it has several inbuilt adapter like SQL Adapter, MSMQ Adapter, SMTP Adapter, File Adapter, SOAP Adapter etc.

What is MessageType in BizTalk Server?

BizTalk sets a message context property called BTS.MessageType. This is a concatenation of the document namespace and Root Node Name in other words Message Type is a BizTalk System property that is promoted inside a Pipeline. It is made up of Document Name Space and Root Node Name.

 

What is the default mapping for Auto Mapping?

 The default mapping for Auto Mapping is is mapping by structure. This can be changed to by node name in the map properties. 

 

What is Biztalk Orchestrations Dehydration and Rehydration?

 

Dehydration:
When an orchestration has been idle for a while, the orchestration engine will save the state information of the instance and free up memory resources.

Rehydration:
When a message is received, or else when a timeout has expired, the orchestration engine can be automatically triggered to rehydrate the instance – it is at this point that the orchestration engine loads the saved instance of the orchestration into memory, restores the state, and runs its from the point it left off.

 

List out some Artifacts of BizTalk Server?

 

#. HAT
#. BAM
#. Adapter
#. Pipeline
#. Orchestrations
#. Role Links
#. Send Ports and Send Port Groups
#. Receive Ports
#. Receive Locations
#. Policies
#. Schemas
#. Maps
#. Resources
#. BAS Artifacts (Business Activity Services)

What is the difference between a Document Schema and a Property Schema?

 

A document schema is used to define a message. It is a definition on an Xml message with optional extensions for flat files, EDI file, etc that enable the parsers to convert the native format into Xml.

A property schema is used to define message context properties. These can be of type MessageDataPropertyBase (the property value is promoted or demoted from/to the message itself) or MessageContextPropertyBase(property value only exists within the message context and can be set by adapters, pipelines or within orchestrations).

If you wish to promote a field from a message into the message context then you need to define a document schema and property schema. In the document schema you promote the required field using the property schema to define the property type that will be used in the message context.

What is Correlation in context of BizTalk Server?

 

An Orchestration can have more than one instance running simultaneously. Even though each of those multiple instances perform the same action, it does it on different data contained within a message.

Correlation is a process of associating an incoming message with the appropriate instance of an orchestration.

For Example: If your orchestration issues a purchase order, receives an invoice, and sends a payment, the developer must make certain that the invoice message is received by the orchestration instance which corresponds to the orchestration that sent the Purchase Order. Without correlation, it would be possible to send out an invoice for thousands of items even though the purchase order is for one. And each correlation is based on a set of properties known as correlation types. 

 

 

 

What is the difference between distinguished field and Promoted Property?

 

Promoted properties can be accessed in Orchestrations, ports, pipelines, schemas. They Use server resources.

Distinguished fields are light weight and can be accessed only in Orchestrations.

 

 

*** What is the latest version of BizTalk Server?

 

The latest version of BizTalk is BizTalk Server 2009 and IDE to develop application is Visual Studio 2008 SP1.

Top New Features
BizTalk Server 2009 introduces enhancements and new featured in four main areas:

Updated Platform Support
Support for Visual Studio 2008 SP1
Support for .NET Framework 3.5 SP1
Support for Windows Server 2008
Virtualization support with Hyper-V
Support for SQL Server 2008

Developer & Team Productivity
New Application Lifecycle Management (ALM) experience – with Support for TFS – integrated source control, bug tracking, unit testing, MSBuild automation

Enhanced Developer Productivity with Visual Studio project system updates & Map Debugging
SOA and Web Services
ESB Guidance 2.0
New UDDI v3 Web Services Registry
New and enhanced LOB adapters (Oracle eBusiness Suite, SQL Server)
Enhanced host systems integration (WebSphere MQ, CICS, IMS)
Enhanced BAM support

Business to Business Integration
New Mobile RFID Platform
New RFID device management and industry standards support
Enhanced Support for EDI and AS2 Protocols and Drummond certification
Updated SWIFT Support and SWIFTReady Financial EAI Gold certification

Please refer to http://www.microsoft.com/biztalk/en/us/whats-new.aspx

 

 

What is the difference between a “Message Assignment” shape and an “Expression” shape?

 

A “Message Assignment” shape is used to create a new message and assign values to it.

A Expression shape is used to assign values to variables and also write ‘if’ conditions.

 

Shrink all databases in SQL Server 2005


/*******  Shrink all databases in SQL Server 2005 using below script ******* /

DECLARE

@name varchar(500)

DECLARE

@sql varchar(8000)

SET

@sql =

DECLARE

Database_Cursor CURSOR READ_ONLY FORSELECT

 NameFROM

sysdatabases

WHERE

DBID > 4

OPEN

Database_Cursor

FETCH

NEXT FROM Database_Cursor INTO @name

WHILE

@@FETCH_STATUS = 0

BEGIN

SET

@sql = @sql + ‘backup log [‘ + @name + ‘] with truncate_only’ + CHAR(10)

SET

@sql = @sql + ‘dbcc SHRINKDATABASE ( [‘ + @name + ‘] )’ + CHAR(10)

FETCH

NEXT FROM Database_Cursor INTO @name

END

CLOSE

Database_Cursor

DEALLOCATE

Database_Cursor 

print

@sql

EXEC

(@sql)

Get database name and file location in SQL Server 2005


 

SELECT

sd

.[name] AS [Database Name] 

, smf.[physical_name] AS [MDF Filename]

FROM

 

sys.master_files smf 

JOIN sys.databases sd ON sd.[database_Id] = smf.[database_Id]

WHERE

smf

.[physical_name] LIKE

‘%send%’

 

———————————————————————————————————————————

/* Get database name and file location in SQL Server 2005 with size */

DECLARE

@Files TABLE

 

(  

Name SYSNAME,  

FileName VARCHAR(256),  

Size BIGINT  

)  

INSERT

@Files

EXEC

 sp_msforeachdb ‘SELECT name, filename, size FROM ?.sys.sysfiles’

SELECT

 *

FROM

@Files

Backup all databases in SQL Server 2005 using SQL JOB


SQL script use to backup all our database. We use it with SQL job:

———————————————————————————————

DECLARE @sql VARCHAR(MAX)
SET @sql = ”
SELECT @sql = @sql+

BACKUP DATABASE [‘ + name + ‘] TO DISK = ”\\CLOUD\Root\Databases\’ + name + ‘_’ + convert(varchar(8),getdate(),112) + ‘.bak”’
FROM sys.databases
WHERE name not in (‘master’, ‘tempdb’, ‘model’, ‘msdb’, ‘pubs’, ‘Northwind’, ‘ReportServer$SQL2005′, ‘ReportServer$SQL2005TempDB’)
AND user_access_desc = ‘MULTI_USER’
AND is_read_only = 0
AND state_desc = ‘ONLINE’

–PRINT @sql
EXEC(@sql

———————————————————————————————

Shrink or truncate Mirrored Database Log File in SQL Server 2005


Here is script to truncate all your database. We run it using SQL Server Agent job. Works like a charm. We use it to truncate transaction logs when database running with SQL mirroring.

——————————————————————————————————————–

DECLARE @sql VARCHAR(MAX)
SET @sql = ”
SELECT @sql = @sql+

USE [‘ + name + ‘]
BACKUP LOG [‘ + name + ‘] TO DISK = ”\\CLOUD\Root\Databases\’ + name + ‘_’ + convert(varchar(8),getdate(),112) + ‘_log.bak”
DBCC SHRINKFILE (”’ + name + ‘_log”, 1, TRUNCATEONLY)’
FROM sys.databases
WHERE name not in (‘master’, ‘tempdb’, ‘model’, ‘msdb’, ‘pubs’, ‘Northwind’, ‘ReportServer$SQL2005′, ‘ReportServer$SQL2005TempDB’)
AND user_access_desc = ‘MULTI_USER’
AND is_read_only = 0
AND state_desc = ‘ONLINE’

–PRINT @sql
EXEC(@sql)

———————————————————————————————————-

TempDB Issues – Shrinking TempDB in SQL Server 2005


From time to time you find yourself needing to shrink some space out of TempDB. Shrinking database files is never my first choice but sometimes it is the best I have. Many people think that you cannot shrink TempDB in SQL 2005

Why would I need to shrink TempDB? 

Yesterday afternoon my pager started going crazy because an Ad-Hoc query that needed some tuning filled TempDB on a server. Luckily, the user only impacted their own query so it was easy to quickly identify them and work with the right people to get the query rewritten. 

Once the immediate problem was resolved there had to be some cleanup. On this server, TempDB has 32 files (1 per processor) all on the same disk. The full database condition caused all kinds of alerts in our monitoring tools, from drive space alerts to too few growths remaining. There were 3 possible solutions to quiet the alerts: 

1.       Reboot – There is never a good time to reboot a production server 

2.       Turn off the Alerts – Not really an option. My preference would be for increasing the sensitivity 

3.       Shrink TempDB – Not a great option, but the best of the 3 

Shrinking TempDB 

Once we had decided that we would go ahead and shrink the files in TempDB it seemed like the hard part was done, but after running the following command: 

USE [tempdb] 

GO 

DBCC SHRINKFILE (N’tempdev’ , 5000) 

GO 

Result :

DBCC SHRINKFILE: Page 1:878039 could not be moved because it is a work file page. 

DbId   FileId      CurrentSize MinimumSize UsedPages   EstimatedPages 

—— ———– ———– ———– ———– ————– 

2      1           878040      640000      4672        4672 

(1 row(s) affected) 

DBCC execution completed. If DBCC printed error messages, contact your system administrator. 

“Page could not be moved because it is a work file page.”. This is a new thing in SQL 2005 caused by the caching that is done in TempDB. I am not going to try to explain here how objects are cached in TempDB, but Kalen Delaney’s Inside Sql Server Series is a great place to learn about it if you are interested (http://www.insidesqlserver.com/books.html).  What is important is that the cached objects are tied to a query plan and that by freeing the procedure cache you can make those objects go away, allowing you to shrink your files. 

Trying again: 

DBCC FREEPROCCACHE 

GO 

USE [tempdb] 

GO 

DBCC SHRINKFILE (N’tempdev’ , 5000) 

GO 

This time it worked: 

DBCC execution completed. If DBCC printed error messages, contact your system administrator. 

DbId   FileId      CurrentSize MinimumSize UsedPages   EstimatedPages 

—— ———– ———– ———– ———– ————– 

2      1           640000      640000      264         264 

(1 row(s) affected) 

DBCC execution completed. If DBCC printed error messages, contact your system administrator.

Get all tables with sizes in SQL Database


Create Table #temp (

      table_name sysname ,

      row_count int,

      reserved_size varchar(50),

      data_size varchar(50),

      index_size varchar(50),

      unused_size varchar(50))

 

SET NOCOUNT ON

insert #temp exec sp_msforeachtable ‘sp_spaceused ”?”’

 

select       a.table_name,

      a.row_count,

      count(*) as col_count,

      a.data_size

from      #temp a

      inner join information_schema.columns b on a.table_name = b.table_name

group by

      a.table_name, a.row_count, a.data_size

Order by CAST(Replace(a.data_size, ‘ KB’, ”) as integer) desc

drop table #temp

Monitoring Transaction LOG Space on SQL Server


 This SP will monitor Log space for SQL Server.

IF OBJECT_ID(‘dbo.logspace_monitor’, ‘P’)
IS NOT NULL
DROP PROCEDURE dbo.logspace_monitor

GO

CREATE PROCEDURE logspace_monitor

      @threshold
int = 70

AS

SET NOCOUNT ON

CREATE TABLE #logSpaceStats

(

databaseName sysname,

logSize decimal(18,5),

logUsed decimal(18,5),

staus int

)

declare @cmd nvarchar(max)

set @cmd = ‘dbcc sqlperf(logspace) with no_infomsgs’

insert into #logSpaceStats exec sp_executesql @cmd

PRINT ‘*********************************************’

PRINT ‘DATABASES with logspace used greated than 70%’

PRINT ‘*********************************************’

PRINT ”

select

cast(DatabaseName as varchar(25))as ‘DATABASE NAME’,

cast(logused as varchar(20))as ‘LOG SPACE USED’,

cast(a.log_reuse_wait_desc as varchar(15)) as ‘LOG REUSE WAIT’

from #logSpaceStats,sys.databases a where databasename=a.name and logUsed >@threshold

go

Identify huge transactions occured in Replication


Sometimes replication slows down when a transaction has a large number of row changes. This script helps you identify those transactions (more that 10,000 rows affected) . Should be executed in distribution database .

SELECT
      mt.entry_time, mt.xact_seqno [XACT NO],
      COUNT(mr.xact_seqno)  [# Commands]

FROM MSrepl_transactions mt WITH(NOLOCK)
JOIN MSrepl_commands mr WITH(NOLOCK)
      ON mt.xact_seqno = mr.xact_seqno
GROUP BY
      mt.entry_time, mt.xact_seqno
having  count(1)  > 10000
ORDER BY 1

Find and FIX all the ORPHAN users in a Database


Description :

When you restore a db on a different machine, users are orphaned ie: there is no login id or password associated with the user (SID differs from machine to machine). This script finds and fixes all orphaned users in a restored database

How to use : Run the below script on the restored database to fix the Orphan Users.

 ————————————————————————————————————-

—  Script to generate the Actual Logins

SELECT

‘create login [‘

+ p.name +‘] ‘+

case

when p.type in(‘U’,‘G’)then‘from windows ‘elseend

+

‘with ‘

+

case

when p.type =‘S’then‘password = ‘+ master.sys.fn_varbintohexstr(l.password_hash)+‘ hashed, ‘

+

‘sid = ‘

+ master.sys.fn_varbintohexstr(l.sid)+‘, check_expiration = ‘

+

case

when l.is_policy_checked > 0 then‘ON, ‘else‘OFF, ‘end+‘check_policy = ‘+casewhen l.is_expiration_checked > 0 then‘ON, ‘else‘OFF, ‘end

+

case

when l.credential_id > 0 then‘credential = ‘+ c.name +‘, ‘elseend

else

end

+

‘default_database = ‘

+ p.default_database_name

+

case

whenlen(p.default_language_name)> 0 then‘, default_language = ‘+ p.default_language_name else

end

FROM

sys.server_principals p

LEFT

JOINsys.sql_logins l

ON

p.principal_id = l.principal_id

LEFT

JOINsys.credentials c

ON

l.credential_id = c.credential_id

WHERE

p.type in(‘S’,‘U’,‘G’

)

AND

p.name <>

‘sa’

 ————————————————————————————————————-

—  Auto Fix ORPHANED USERS

DECLARE

@UserName nvarchar(255)

DECLARE

orphanuser_cur cursorfor

SELECT

UserName =name

FROM

sysusers

WHERE

issqluser = 1 and(sidisnotnullandsid<> 0x0)andsuser_sname(sid)isnull

ORDER

BYname

–cannot translate sid to existing user=orphaned

OPEN

orphanuser_cur

FETCH

NEXTFROM orphanuser_cur INTO@UserName

WHILE

(@@fetch_status= 0)

BEGIN

PRINT

@UserName +‘ user name being resynced’

EXEC

sp_change_users_login‘Update_one’, @UserName,@UserName

FETCH

NEXTFROM orphanuser_cur INTO @UserName

END

CLOSE

orphanuser_cur

DEALLOCATE

orphanuser_cur

go

 ————————————————————————————————————-

Query to Identify Missing Indexes


The following are the missing index DMVs ( From SQL Server 2005 BOL)

sys.dm_db_missing_index_group_stats = Returns summary information about missing index groups, for example, the performance improvements that could be gained by implementing a specific group of missing indexes.

sys.dm_db_missing_index_groups = Returns information about a specific group of missing indexes, such as the group identifier and the identifiers of all missing indexes that are contained in that group.

sys.dm_db_missing_index_details = Returns detailed information about a missing index; for example, it returns the name and identifier of the table where the index is missing, and the columns and column types that should make up the missing index.

sys.dm_db_missing_index_columns = Returns information about the database table columns that are missing an index.

——————————————————————————————————————————————–

SELECT
statement AS [database.scheme.table],
column_id , column_name, column_usage,
migs.user_seeks, migs.user_scans,
migs.last_user_seek, migs.avg_total_user_cost,
migs.avg_user_impact
FROM sys.dm_db_missing_index_details AS mid
CROSS APPLY sys.dm_db_missing_index_columns (mid.index_handle)
INNER JOIN sys.dm_db_missing_index_groups AS mig
ON mig.index_handle = mid.index_handle
INNER JOIN sys.dm_db_missing_index_group_stats  AS migs
ON mig.index_group_handle=migs.group_handle
ORDER BY mig.index_group_handle, mig.index_handle, column_id
GO

===============================================================================

/* ——————————————————————
— Title: FindMissingIndexes
— Description: This query returns indexes that SQL Server 2005
— (and higher) thinks are missing since the last restart. The
— “Impact” column is relative to the time of last restart and how
— bad SQL Server needs the index. 10 million+ is high.
— Changes: Updated to expose full table name. This makes it easier
— to identify which database needs an index. Modified the
— CreateIndexStatement to use the full table path and include the
— equality/inequality columns for easier identifcation.
—————————————————————— */

SELECT 
 [Impact] = (avg_total_user_cost * avg_user_impact) * (user_seeks + user_scans), 
 [Table] = [statement],
 [CreateIndexStatement] = ‘CREATE NONCLUSTERED INDEX ix_’
  + sys.objects.name COLLATE DATABASE_DEFAULT
  + ‘_’
  + REPLACE(REPLACE(REPLACE(ISNULL(mid.equality_columns,”)+ISNULL(mid.inequality_columns,”), ‘[‘, ”), ‘]’,”), ‘, ‘,’_’)
  + ‘ ON ‘
  + [statement]
  + ‘ ( ‘ + IsNull(mid.equality_columns, ”)
  + CASE WHEN mid.inequality_columns IS NULL THEN ” ELSE
   CASE WHEN mid.equality_columns IS NULL THEN ” ELSE ‘,’ END
  + mid.inequality_columns END + ‘ ) ‘
  + CASE WHEN mid.included_columns IS NULL THEN ” ELSE ‘INCLUDE (‘ + mid.included_columns + ‘)’ END
  + ‘;’,
 mid.equality_columns,
 mid.inequality_columns,
 mid.included_columns
FROM sys.dm_db_missing_index_group_stats AS migs
 INNER JOIN sys.dm_db_missing_index_groups AS mig ON migs.group_handle = mig.index_group_handle
 INNER JOIN sys.dm_db_missing_index_details AS mid ON mig.index_handle = mid.index_handle
 INNER JOIN sys.objects WITH (nolock) ON mid.OBJECT_ID = sys.objects.OBJECT_ID
WHERE (migs.group_handle IN
  (SELECT TOP (500) group_handle
  FROM sys.dm_db_missing_index_group_stats WITH (nolock)
  ORDER BY (avg_total_user_cost * avg_user_impact) * (user_seeks + user_scans) DESC)) 
 AND OBJECTPROPERTY(sys.objects.OBJECT_ID, ‘isusertable’) = 1
ORDER BY [Impact] DESC , [CreateIndexStatement] DESC

List all Indexes For SQL SERVER Database


SELECT

            so.name AS TableName

            , si.name AS IndexName

            , si.type_desc AS IndexType

FROM

            sys.indexes si

            JOIN sys.objects so ON si.[object_id] = so.[object_id]

WHERE

            so.type = ‘U’    –Only get indexes for User Created Tables

            AND si.name IS NOT NULL

ORDER BY

            so.name, si.type

SQL Server 2008 R2 High Availability Architecture


Published: May 2010 

Applies to: SQL Server 2005, SQL Server 2008, SQL Server 2008 R2

Summary:

This whitepaper describes five commonly-deployed architectures using SQL Server 2005 and SQL Server 2008 that are designed to meet the high-availability and disaster recovery requirements of enterprise applications. The whitepaper will describe the architectures and also present case studies that illustrate how real-life customers have deployed these architectures to meet their business requirements.

This whitepaper is targeted at architects, IT Pros, and senior database administrators tasked with architecting a high-availability and disaster-recovery strategy for their mission-critical applications. It assumes the reader has a good understanding of Windows and SQL Server technologies and has sufficient knowledge of transaction processing. These basic features and topics are not covered.

Introduction and Overview

SQL Server 2005 and SQL Server 2008 include many technologies that can be used to minimize downtime and maximize data protection so that database administrators can ensure smooth operation, continuous access to business critical data, and meet availability levels according to various service level agreements.

Sometimes high-availability and disaster-recovery architectures are unfortunately designed without considering the necessary business requirements—possibly there is already an incumbent technology, or the designers are familiar with a certain technology and choose it as the basis for a new architecture. This choice, when coupled with a lack of understanding of the capabilities of the various high-availability and disaster-recovery technologies, can lead to an architecture that fails to meet the business needs.

It is imperative that the high-availability and disaster-recovery requirements of the business are the drivers when evaluating which technologies are suitable as part of the architecture. The two major business needs to consider are:

  • The duration of acceptable application downtime, whether from an unplanned outage or from scheduled maintenance/upgrades (i.e. the defined Recovery Time Objective—RTO).
  • The ability to accept potential data loss from an outage (i.e. the defined Recovery Point Objective—RPO).

There is an existing whitepaper, “High-Availability with SQL Server 2008” (available at http://msdn.microsoft.com/en-us/library/ee523927.aspx), that contains information about each of the high-availability technologies in SQL Server 2008, as well as further links to other whitepapers and technical resources. It also describes how to evaluate business requirements and technical/non-technical limitations to help choose appropriate technologies.

However, there is a lack of information regarding proven architectures and real-life customer deployments, where the high-availability and disaster-recovery architecture was chosen after careful requirements analysis and technology evaluation.

This whitepaper provides a consolidated description of five proven and commonly deployed high-availability and disaster-recovery architectures, in terms of the technologies used and the business requirements they are able to meet.

Furthermore, before committing to the implementation of any technology strategy, many companies would like some level of reassurance that what they are attempting has been successfully accomplished previously. To meet this need, Microsoft regularly publishes case studies showing how their technologies have been used. This whitepaper also includes references to relevant case studies of real-life customer deployments for each of the architectures described.

Together these two whitepapers will provide the information necessary to allow the design of an appropriate and successful high-availability and disaster-recovery architecture.

Failover Clustering for High Availability with Database Mirroring for Disaster Recovery

In this architecture, failover clustering provides the local high availability and database mirroring provides the disaster recovery capability. A failover cluster on its own protects against physical server, Windows Server, and SQL Server failures but does not maintain a redundant copy of the data and so does not protect against a major outage like an I/O subsystem failure, power failure, or failure of the network link to the primary data center.

Database mirroring is one way to provide a redundant copy of a single database on a separate physical server, where the server can be in the same data center or geographically separated.

This architecture is widely adopted by customers who are familiar and comfortable with the installation, configuration, and maintenance of failover clusters.

A typical implementation of this architecture involves a failover cluster in the primary data center with database mirroring to a secondary data center or disaster-recovery site, as shown in Figure 1 below.

Figure 1: Failover clustering combined with database mirroring.

There are a number of variations and configuration options for this architecture depending on the business requirements, including the following:

  1. Each data center has a failover cluster with database mirroring between them. If the business requirements state that the workload performance should not be impacted after a failover to the secondary data center, the mirror server needs to have the same hardware configuration (and hence workload servicing capability) as the failover cluster in the primary data center. The alternative, of course, is to have a less capable stand-alone server as the mirror server—however, this is not a recommend best practice.
  2. Synchronous vs. asynchronous database mirroring. Synchronous database mirroring can allow a zero data-loss requirement to be met, potentially with some workload performance impact depending on the type of workload and the network bandwidth between the two data centers. Asynchronous database mirroring does not guarantee zero data loss in the case of a disaster, but has no impact on workload performance.
  3. Automatic failover to the secondary data center. When synchronous database mirroring is configured with a third (optional) witness server, the database mirroring system can detect a failure and perform an automatic failover to the secondary data center. If this behavior is desirable, care must be taken to configure the database mirroring partner timeout such the local failover cluster fails over before database mirroring performs a failover to the secondary data center.
  4. Automatic client connection to the secondary data center. If explicit client redirection is used, the client specifies the FAILOVER_PARTNER in the connection string. After a database mirroring failover has occurred, the client simply has to reconnect and the connection will automatically be made to the secondary data center. Alternatively, some form of external routing can be used (some installations have used DNS routing, for instance).

Deployment Example: CareGroup Healthcare System

CareGroup manages a number of hospitals in the Boston area and has 390 databases underpinning 146 mission-critical clinical applications, totaling 2 terabytes of data.

The RPO and RTO requirements for their databases depend on the importance of the data contained within the database. CareGroup defined three tiers to classify this:

  • ‘AAA’: zero downtime and zero data loss
  • ‘AA’: up to one hour of downtime and data loss
  • ‘A’: up to 1 day of downtime and data loss

CareGroup also wanted to remove the need to hard-code the database mirroring partner server names in the application connection string to redirect client connections during a disaster recovery failover.

Using these requirements, they were able to determine that a combination of SQL Server failover clusters in two data centers with database mirroring between the data centers was the appropriate solution. For the ‘AAA’ databases, database mirroring is configured synchronously to avoid data loss, and for the lower-classed databases it is configured asynchronously. In the event of a failure, DNS routing is used to redirect traffic to the secondary data center.

The architecture that CareGroup deployed is illustrated in Figure 2 below.

Figure 2: High-availability and disaster-recovery architecture deployed by CareGroup.

The Global Site Selector (GSS) enables the various applications at CareGroup to seamlessly connect to the appropriate database mirroring principal server, without having to specify partner server names in the connection string for the client redirection. This is necessary as some of the applications that CareGroup uses are from 3rd-party vendors that do not permit (or require too much work for) the client connection string to be altered to use explicit client redirection.

Instead, the applications specify one SQL Server instance name in the connection string, of the form “Green\SQL1”. In this connection string where the server name “Green” is a DNS alias that resolves to the GSS device, which in turn translates the alias “Green” into the appropriate IP address of the current database mirroring principal server.   

Using this architecture, CareGroup was able to meet their availability requirements, including performing an upgrade to SQL Server 2008 using database mirroring that only involved a few minutes of downtime.

As an aside, by upgrading to SQL Server 2008, CareGroup can also take advantage of some of the other features in the product:

  • Transparent Data Encryption to allow all data in CareGroup’s databases to be encrypted without requiring costly changes to existing applications, which satisfied their desire to increase the security of patient records.
  • Advanced Server Auditing to allow CareGroup to monitor all activity in databases across their enterprise and ensure compliance with HIPAA and other sets of regulations.
  • Policy-based Management and Performance Data Collection gives CareGroup enhanced configuration policy enforcement, and easy performance diagnostic information generation, storage, and analysis, respectively.
  • Resource Governor allows CareGroup to guarantee critical workload performance and prevent unexpected workloads from affecting application availability.
  • Reporting Services will allow CareGroup to create a single, consolidated reporting solution.

More information on this solution can be found at:

Another example of this architecture is described in the case study of the deployment by ServiceU Corporation, available at:

Database Mirroring for High Availability and Disaster Recovery

In this architecture, synchronous database mirroring can be used to maintain an up-to-date, redundant copy of a single database by continually sending transaction log records from the principal database on the principal server to the mirror database on the mirror server.

If a failure occurs, the mirror database can be brought online as the new principal database and client connections can be failed over. As long as the mirror database remains synchronized with the principal database, zero data loss results when a failover is necessary.

There are a number of variations and configuration options for this architecture depending on the business requirements, including the following:

  1. Configuring a third server, the witness. When a witness server is included as part of a synchronous database mirroring architecture, a failover can be performed automatically when a failure is detected, providing the highest availability of the data. If database mirroring is used between two data centers, it is recommended to place the witness in a third data center, for the highest availability.
  2. Configuring asynchronous database mirroring. When the network link between the principal and mirror servers is not sufficient to synchronously send the transaction log records without leading to workload performance degradation, database mirroring can be configured to send the transaction log records asynchronously. While this removes the performance degradation, it also removes the assurance of zero data-loss if a failover is necessary. This may be perfectly acceptable depending on the desired RPO.
  3. Configuring database mirroring and log shipping. Database mirroring allows a single mirror of the principal database, so for added redundancy, one or more log shipping secondary servers can also be configured as warm-standby databases.

This architecture is typically lower cost than one involving failover clustering, as the principal and mirror servers can be standalone servers with direct-attached storage, rather than each part of a multi-server failover cluster with SAN storage. It is most commonly used when the business requirements call for databases to be protected for disaster recovery purposes and for some businesses, when there is some technical or operational reason for not using failover clustering.

A typical implementation of this architecture involves a principal server in the primary data center with a mirror server in a secondary data center or disaster-recovery site. There is often a third server, the witness, included in the architecture as shown in Figure 3 below.

Figure 3: Database mirroring for high availability and disaster recovery.

Deployment Example: bwin Corporation

bwin is an online gaming company that provides a wide variety of games and sports betting, with up to 1 million bets per day placed on more than 90 sports. They have more than 100 terabytes of data spread over 850 databases on more than 100 instances of SQL Server, with the largest single database being more than 4 terabytes. At peak times their system can support more than 450 thousand Transact-SQL statements per second.

They wanted to be able to cope with complete loss of their primary data center, and their budget allowed them to implement a solution which meets their business requirements. They also want zero data-loss and 99.99% availability 24×7. The solution they chose involved synchronous database mirroring over dark-fiber between two data centers that are 11 kilometers apart. They also maintain two log shipping secondaries—one in each data center. The log shipping secondary in the main data center is configured with 1-hour restore delay to allow recovery from accidental user errors (such as delete or update).

The architecture that bwin deployed is illustrated in Figure 4 below.

Figure 4: High-availability and disaster-recovery architecture deployed by bwin.

This architecture was deployed on SQL Server 2005 and enabled bwin to meet all their business requirements around high availability and disaster recovery, while also being able to service their peak workload. Bwin plans to upgrade this architecture in future to add a database mirroring witness server to allow automatic failovers.

After moving to SQL Server 2008, bwin is planning to take advantage of some of the new features in the product:

  • Database mirroring log stream compression will result in improved throughput.
  • Backup compression will reduce the size of some backups by over 80%. This will allow bwin to extend the life of its systems as it experiences rapid growth.
  • Enhanced Auditing to allow bwin to comply with the myriad regulations in the countries around the world in which it operates.

More information on bwin’s testing an migration to SQL Server 2008 can be found at:

Geo-Clustering for High Availability and Disaster Recovery

In this architecture, a geographically-dispersed cluster (geo-cluster) is implemented, which behaves like a regular failover cluster but the constituent servers are in geographically separate sites. The failover cluster quorum is maintained between the sites, and the data disks are synchronously or asynchronously mirrored.

If the servers fail in the main data center, the SQL Server instances are started in the secondary data center in a manner similar to when the servers are collocated and the clients reconnect in the same way as for a failover of a regular failover cluster (and vice-versa). To achieve this it is often necessary to use a very fast network link (like dark fiber) and a network configuration that abstracts the physical location of the cluster nodes from the clients.

The cluster nodes themselves are unaware that they are part of a geo-cluster so all replication must be handled at the storage level. If the data disks are synchronously mirrored between sites, then zero data-loss will occur if a failover is necessary, but requires sufficient network bandwidth.

This architecture is deployed when seamless failover of an entire SQL Server instance is required between multiple data centers, avoiding the potential downtime of having to perform a disaster recovery operation.

A typical implementation of this architecture involves the main failover cluster nodes in the primary data center with the other failover cluster nodes in the secondary data center or disaster-recovery site, as shown in Figure 5 below.

Figure 5: Geo-Clustering for high availability and disaster recovery.

Deployment Example: QR Limited

QR Limited is Australia’s leader in rail transportation and logistics, operating more than 1,000 train services a day, including carrying more than 170,000 passengers and more than 683,000 tons of freight.

QR Limited migrated their SAP databases from a legacy mainframe onto a SQL Server 2005 and wanted to provide high availability and disaster recovery capabilities for the various SAP databases and the one terabyte ERP database, but with the ability to seamlessly protect against loss of a data center without having to perform protracted disaster recovery.

They chose to implement a geo-cluster between two data centers 5 kilometers apart, with a fiber link between them to accommodate the SAN replication network traffic and all client communications to the active cluster nodes. The data disks are synchronously from the production data center to the disaster recovery data center.

The architecture that QR Limited deployed is illustrated in Figure 6 below.

Figure 6: High-availability and disaster-recovery architecture deployed by QR Limited.

By switching from mainframe-based DB2 to SQL Server 2005, they realized the following additional benefits to their enhanced high availability and disaster recovery:

  • $100,000 savings per month mainframe cost savings.
  • SAP ERP transactional response times that are 20% to 40% faster.
  • An 8-to-1 reduction in batch processing time.

More information on this solution can be found at:

Failover Clustering for High Availability Combined with SAN-Based Replication for Disaster Recovery

This architecture uses failover clustering to provide local high availability and SAN-based replication to provide disaster recovery. The database volumes on the SAN in the main data center are mirrored to another SAN in a secondary data center, which does not necessarily need to be attached to another failover cluster.

If the main data center is lost, there is no automatic failover of a SQL Server instance to the server in the secondary data center, but there is a redundant copy of the databases that can be mounted and attached to Windows and to a SQL Server instance.

This architecture is often used when a business requires that databases from different vendors, used by related but distinct applications, be logically consistent to maintain data integrity in the case of a disaster.

A typical implementation of this architecture involves a failover cluster in the primary data center with SAN-based replication of the storage used by the various SQL Server instances to a SAN in the secondary data center or disaster-recovery site, as shown in Figure 7 below.

Figure 7: Failover clustering combined with SAN-based replication.

There are a number of variations and configuration options for this architecture depending on the business requirements, including the following:

  • Synchronous vs. asynchronous replication. With synchronous replication, there is zero data loss when a failure occurs, but more network bandwidth may be required to prevent workload performance degradation. With asynchronous replication, no such assurance is available, but there is no performance degradation.
  • Server configuration in the secondary data center. Sometimes there is a standalone server in the secondary data center instead of a failover cluster. This architecture is used when requirements allow local availability to be lower after the loss of a data center, or when budgetary limitations do not allow for a failover cluster in the secondary data center.
  • Bi-directional replication. If there is an active server (of failover cluster) in each data center, SAN-based replication can be used to provide data redundancy between the data centers for the data from both failover clusters.

Deployment Example: Progressive Insurance

Progressive Insurance is one of the largest auto insurance companies in the United States with revenues of more than $14 billion. They were replacing a 30-year-old mainframe-based policy management application that served millions of customers through a network of 30000 independent insurance agencies. When fully deployed, the total data size will be 10 terabytes and the largest table will have almost 2 billion rows.

As well as replacing the legacy application, Progressive required no more than 1 hour of data loss and a maximum allowable downtime of 24 hours.

Progressive chose to use failover clusters in two active data centers for local high availability, with asynchronous SAN replication between them to provide data redundancy in the event of a disaster.

The SQL Server 2005 architecture that Progressive deployed is illustrated in Figure 8 below.

Figure 8: High-availability and disaster-recovery architecture deployed by Progressive.

The OC-48 links provide 2.5 gigabits per second and are shared with other Windows servers and mainframe to provide asynchronous replication between the EMC Symmetrix DMX 3 and 4 series SANs.

Progressive is also making use of the following SQL Server 2005 features to enhance availability:

  • Table partitioning to allow easier management of 1-terabyte sized tables, especially using the ‘sliding-window’ mechanism to allow fast range-deletes without long-running, blocking operations.
  • Online index operations to allow critical index maintenance to be performed without requiring scheduled downtime.
  • Dynamic Management Views to allow much easier insight into system conditions that could affect performance and data availability.

More information on this solution can be found at:

Peer-to-Peer Replication for High Availability and Disaster Recovery

This architecture uses peer-to-peer replication to provide both high availability and disaster recovery. Peer-to-peer replication uses a bi-directional transactional replication stream, with all nodes in the replication topology receiving updates from all other nodes.

Peer-to-peer replication involves some latency between a transaction committing on one node and the change being replayed on all other nodes in the replication topology, so it is not suitable for satisfying zero data-loss requirements. It also does not provide automatic detection of failures or automatic failover. It does, however, allow multiple copies of the protected data to be made, and furthermore, those copies are available for read and (with a lot of planning and care) write activity.

Peer-to-peer replication essentially makes a database both a publication and a subscription database, and so local insert, update, and delete activity is permitted in the same database and tables that are receiving updates from other nodes. For this reason, table schemas and application logic must be carefully developed to avoid conflicts (even with SQL Server 2008, which helps with automatic conflict detection and resolution).

This architecture is used when the secondary data copy is required to be available for reading or writing, and/or when multiple copies of the data must be maintained.

A typical implementation of this architecture involves a peer-to-peer node in each data center, with updates occurring and being received by all other nodes in the other data centers, as shown in Figure 9 below.

Figure 9: Peer-to-peer replication for high availability and disaster recovery.

Deployment Example: An International Travel Industry Company

This company is one of Asia’s leading and fastest growing provider of online hotel reservations, with data centers in Asia and the United States. In their previous architecture, all write activity was handled in the Asia data center, whereas only reads could be serviced from the US data center.

They wanted to remove the single point of failure—the data center in Asia—by having all data available at both data centers, and either data center able to handle write requests. They chose to implement a combination of peer-to-peer replication as well as traditional transactional replication to use the disaster-recovery hardware to process the read-only workload.

Database mirroring and log shipping were not options as both data centers had to be able to handle write requests—which neither technology permits. Failover clustering was similarly discounted, and also because of a desire to limit the capital expenditure on hardware.

The architecture that the travel company deployed is illustrated in Figure 10 below.

Figure 10: High-availability and disaster-recovery architecture deployed by the travel company.

The SQL Server Customer Advisory team worked closely with this customer to produce a very detailed whitepaper describing the requirements analysis, technology analysis, replication solution design, and testing strategy. It is available at http://sqlcat.com/whitepapers/archive/2009/09/23/using-replication-for-high-availability-and-disaster-recovery.aspx.

Conclusion

This whitepaper has highlighted five commonly deployed high-availability and disaster-recovery architectures using SQL Server technologies, along with examples of real-life customer deployments of these architectures.

The high-availability and disaster-recovery technologies provided in SQL Server 2005 have been further enhanced in SQL Server 2008. It is very important to select architectures after carefully considered business requirements, and then deploy the technology to meet those requirements.  It can be tempting to select a new and interesting (or possibly incumbent) technology, regardless of the business requirements, but that can be counterproductive in the long run.

It can be very useful to review published reference implementations from SQL Server customers, both to see what technology choices worked for the customers’ requirements, and also to potentially learn from their experiences.

Finally, while SQL Server 2005 provides all the technologies needed to implement a successful high-availability and disaster-recovery architecture, SQL Server 2008 has many enhancements to these technologies, and includes many others that can aid with security, maintainability, and performance

The information presented in this whitepaper, and in those to which it links, should provide a basis for anyone tasked with evaluating and choosing SQL Server 2008 technologies, with the goal of protecting and increasing the availability of critical business data.

For more information:

SQL Server 2008 Tutorials & Understanding.


                        SQL Server 2008 Tutorial

This SQL Server tutorial is for anyone who wants to learn how to use SQL Server 2008. It assumes a basic understanding of databases and how they work.

If you don’t currently have an understanding of how databases work, start with the basic database tutorial first. That tutorial covers the basic concepts of databases, so the examples in this tutorial will make more sense once you return here.

To get the most out of this tutorial, you should have a copy of SQL Server installed on your computer, so that you can go through the examples yourself. The examples in this tutorial are all done using the free version of SQL Server – SQL Server 2008 Express. You can download SQL Server 2008 Express from the Microsoft website (we’ll get to that in a minute).

If you don’t currently have SQL Server yet, that’s OK. The tutorial is straight forward and contains plenty of screenshots.

About SQL Server

Microsoft SQL Server is a Relational Database Management System (RDBMS) designed to run on platforms ranging from laptops to large multiprocessor servers. SQL Server is commonly used as the backend system for websites and corporate CRMs and can support thousands of concurrent users.

SQL Server comes with a number of tools to help you with your database administration and programming tasks.

SQL Server is much more robust and scalable than a desktop database management system such as Microsoft Access. Anyone who has ever tried using Access as a backend to a website will probably be familiar with the errors that were generated when too many users tried to access the database!

Although SQL Server can also be run as a desktop database system, it is most commonly used as a server database system.

Server Database Systems

Server based database systems are designed to run on a central server, so that multiple users can access the same data simultaneously. The users normally access the database through an application.

For example, a website could store all its content in a database. Whenever a visitor views an article, they are retrieving data from the database. As you know, websites aren’t normally limited to just one user. So, at any given moment, a website could be serving up hundreds, or even thousands of articles to its website visitors. At the same time, other users could be updating their personal profile in the members’ area, or subscribing to a newsletter, or anything else that website users do.

Generally, it’s the application that provides the functionality to these visitors. It is the database that stores the data and makes it available. Having said that, SQL Server does include some useful features that can assist the application in providing its functionality.

SQL Server Editions

SQL Server comes in many editions. Some are paid versions others are free. The examples in this tutorial were done using the free Express version of SQL Server.

SQL Server 2008 Editions

If you are serious about installing (or upgrading) SQL Server, this page provides a quick overview of your options.

SQL Server 2008 comes in many different editions. The edition you choose will depend on your requirements. If you are looking for a free database management system, you will need to choose one of the Express editions or the Compact edition. You could also try the Evaluation edition, which allows you to trial SQL Server 2008 for 180 days.

The Editions

Here are the different editions available for SQL Server 2008.

  • Enterprise Edition
    Data management and business intelligence platform providing enterprise class scalability, high availability, and security for running business-critical applications
  • Standard Edition
    Data management and business intelligence platform providing ease of use and manageability for running departmental applications
  • Workgroup Edition
    Data management and reporting platform providing secure, remote synchronization, and management capabilities for running branch applications
  • Developer Edition
    May be installed and used by one user to design, develop, test, and demonstrate your programs on as many systems as needed
  • Web Edition
    A low-TCO, scalable, and manageable database option for web hosters and end customers looking to deploy publicly facing web applications and services
  • Express Edition
    A free edition of SQL Server ideal for learning and building desktop and small server applications and for redistribution by ISVs
  • Compact Edition
    A free, SQL Server embedded database ideal for building stand-alone and occasionally connected applications for mobile devices, desktops, and web clients
  • Evaluation Edition
    This edition may be installed for demonstration and evaluation purposes until an expiration period of 180 days.

SQL Server Edition in this Tutorial

 SQL Server 2008 Express with Advanced Services includes the following features:

  • SQL Server database engine – create, store, update and retrieve your data
  • SQL Server Management Studio Basic – visual database management tool for creating, editing and managing databases
  • Full-text Search – powerful, high-speed engine for searching text-intensive data
  • Reporting Services – integrated report creation and design environment to create reports

The Express edition also comes as SQL Server 2008 Express with Tools, which only includes the SQL Server database engine and the SQL Server Management Studio Basic, and SQL Server 2008 Express (Runtime Only) which doesn’t include the SQL Server Management Studio.

While the free version of SQL Server does have its limitations, it is a good start for those starting out with SQL Server.

SQL Server Management Studio (SSMS)

SQL Server Management Studio (SSMS) is the main administration console for SQL Server.

SSMS enables you to create database objects (such as databases, tables, views etc), view the data within your database, you can configure user accounts, transfer data between databases, and more.

Here’s what SQL Server Management Studio looks like when you first open it up:

The left pane contains the Object Explorer. The Object Explorer provides navigation to databases, server objects (such as triggers), log files, and more.

The right pane allows you to write queries against the database and view the results. In this screenshot I have opened a blank query by clicking the “New Query” button. You can also bring up other windows, such as the Properties window.

Note that I have minimized the size of the window for this screenshot. Once maximized, you have much more room to play with.

You can use SQL Server Management Studio to create as many databases as you like. You can also connect to as many databases on as many servers as you like.

Most of the tasks performed with SQL Server Management Studio are initiated either from the top menu, or by right-clicking on an icon/object.

Throughout most of this tutorial, we’ll be looking at the various things you can do via SQL Server Management Studio.

.

 

 

SQL Server – Create a Database

 

One of the first things we should look at with SQL Server/Management Studio is how to create a database. After all, most of the tasks you perform with SQL Server will evolve around one or more databases.

System Databases

If you’ve only just installed SQL Server, you might notice that some databases have already been created. These are system databases.

Database Type Description
master System database Stores system level information such as user accounts, configuration settings, and info on all other databases.
model System database This database is used as a template for all other databases that are created.
msdb System database Used by the SQL Server Agent for configuring alerts and scheduled jobs etc
tempdb System database Holds all temporary tables, temporary stored procedures, and any other temporary storage requirements generated by SQL Server.

We will now create another database for our own use.

Creating a New Database

The following steps demonstrate how to create a database in SQL Server using SQL Server Management Studio.

  1. Right click on the “Databases” icon and select “New Database…”:
  2. Name your database and click “OK”:

Your New Database

You will now notice your new database appears under the “Databases” section of SQL Server Management Studio.

Your new database is based on the “Model” database. The Model database is a system database which is used as a template whenever a new database is created. If you use the left pane to navigate to your database and expand the tree, you will notice that your database already contains a number of objects. For example, it already contains system functions, system views, system stored procedures, and (hidden) system tables. These are system objects which provide information about the database.

Other Options

We have just created a database using the default options. When we created the database, a “Data File” and a “Transaction Log” were created. They were created in the default location for our server.

If we’d wanted to, we could have specified a different location for these files. We also could have changed specifications on whether to allow the file to grow automatically (as it stores more and more data), and if so, how that growth should be managed. We could have done that at step 2. But all is not lost. We can still do it now that we’ve created the database. We can do it via the Properties dialog box.

To view or change the database properties, simply right click on the database and select “Properties”:

The Properties dialog contains a large number of options for changing the configuration of your database. For now, we can leave everything at its default setting.

 

SQL Server – Create a Table

 

This lesson demonstrates how to create a table in a SQL Server database using SQL Server Management Studio (SSMS).

  1. Ensuring you have the right database expanded, right click on the “Tables” icon and select “New Table…”:
  2. While you have this screen open, do the following:
    1. Using the values in the screenshot, complete the details in the “Column Name” column, the “Data Type” column, “Length” column, and “Allow Nulls” column.
    2. Make the IndividualId column an “identity column”, by setting “Is Identity” to “Yes” (this option is under the “Identity Specification” section in the bottom pane). Note that to set values in the bottom pane, you need to select the column name in the top pane first). This column is going to be an auto-number column – it will contain an incrementing number for each record that is created.
    3. Set the “Default Value” of the DateCreated column to (getdate()). (This will automatically insert the current date into that field for each new record).

 

What we are doing at this stage, is creating the column names, specifying the type of data that can be entered into them, and setting default values. Restricting the data type for each column is very important and helps maintain data integrity. For example, it can prevent us from accidentally entering an email address into a field for storing the current date.

  1. Save the table by selecting File > Save Table_1:
  2. When prompted, name your table:

Your New Table

Now that you’ve created a new table, it will appear under your database in the “Tables” section.

 

 

 

SQL Server – Adding Data

Editing Table Rows

We can use the “Edit Top 200 Rows” option to add data to our table.

  1. To use this option, right click on the table you wish to open, and select “Edit Top 200 Rows”:
  2. You can now start entering the data directly into your table.

Note that you don’t need to enter data into the IndividualId and DateCreated columns. This is because the they will be populated automatically (remember, we set IndividualId to “Is Identity” and DateCreated to “GetDate()”))

Disadvantages of Entering Data Directly to your Table

The above method is fine if you only have a small amount of data to enter or update. If you have a lot of data to enter, this could become very tedious. Also, if you have multiple environments (for example, a development environment, staging environment, and production environment), with duplicate databases configured in each environment, you will need to re-enter the same data into each environment.

When you’re first learning SQL Server, this may not be a major concern. However, in an environment such as described, entering data directly into the table becomes quite inefficient.

A Better Method – SQL Scripts

In most cases, you will probably find it more efficient to write a SQL script. Using a script enables you to re-enter the data as many times as you like. If you need to rebuild your database for any reason, you can simply run your script to enter the data. If you have multiple environments, once again you can run your script against each environment.

Once you get used to writing and running scripts, you will probably find it quicker than entering data directly into the table.

Database Driven Websites

When you create a database driven website, you actually write SQL scripts to insert data into the database. You also use SQL scripts to read data, update data, and delete data. These SQL scripts can be placed directly into your website code (PHP, ColdFusion etc), or they can be stored in the database as Stored Procedures or Views.

 

 

 

SQL Server – SQL Scripts

 

Create a New Query

Before we generate our SQL script, we need somewhere to enter it into. This part is easy. Just click the “New Query” button:

A blank, white sheet should now appear on the right pane.

Write/Run Your SQL Script

You are now ready to write SQL queries against your database. You can use this interface to create database objects (such as databases, tables, views etc), insert data into a database table, select data, update data, delete data.

To run an SQL query:

  1. Type your query into the workspace on the right pane
  2. Click “Execute” (you can also press F5)

The following screenshot shows an example of using a SQL ‘select’ statement to select data from a database:

As you can see, the results of the query are displayed in the bottom pane.

As a shortcut, you can click on a table in the left pane and drag it to the right pane when building your query. This can save you time – especially if you have many tables to add to your query.

The above ‘select’ statement is an example of a SQL query. Apart from the occasional example, SQL queries are outside the scope of this tutorial. If you’d like to learn more about writing SQL queries, check out the SQL tutorial.

Database Administration Tasks

Most of the database administration tasks that can be performed in SSMS via the graphical user interface can be performed programatically via SQL scripts. This tutorial concentrates on using the graphical user interface, mainly because it’s usually a lot easier for new users to get their head around. Once you become more familiar with SQL Server, you may find yourself using SQL scripts to perform many of the tasks that you started out doing via the graphical user interface.

 

 

SQL Server – Query Designer

 

About The Query Designer

The graphical query designer is a graphical user interface that allows you to build queries to run against your SQL Server database. This can be particularly useful when building complex queries that involves many tables, views etc.

The query designer can also be beneficial for those who are learning how to write SQL. This is because you don’t need to remember the SQL syntax in order to write queries against your database – the query designer generates the SQL for you.

Building Your Queries

To build a query with the query gesigner:

  1. Select Query > Design Query in Editor…:
  2. Add the tables you want to run the query against. In this case, we only have one table to choose from.
  3. Select the column/s you want to display in your query:
  4. Click “OK”

Once you’ve clicked OK, you will find the query has been added to your workspace. You can then run it as you would any other query.

 

 

SQL Server – Views

 

In SQL Server, a view is a pre-written query that is stored on the database. A view consists of a SELECT statement, and when you run the view, you see the results of it like you would when opening a table. Some people like to think of a view as a virtual table. This is because a view can pull together data from multiple tables, as well as aggregate data, and present it as though it is a single table.

Benefits of Views

A view can be useful when there are multiple users with different levels of access, who all need to see portions of the data in the database (but not necessarily all of the data). Views can do the following:

  • Restrict access to specific rows in a table
  • Restrict access to specific columns in a table
  • Join columns from multiple tables and present them as though they are part of a single table
  • Present aggregate information (such as the results of the COUNT function)

Accessing Views

Any view that you create ends up being located under the “Views” folder of your database.

The following screenshot shows a number of views that are located within the “AdventureWorks2008” database:

Creating a View

You create a view by using the CREATE VIEW statement, followed by the SELECT statement.

CREATE VIEW ViewName AS
SELECT ...

Example:

CREATE VIEW "Alphabetical list of products" AS
SELECT Products.*, Categories.CategoryName
FROM Categories INNER JOIN Products ON Categories.CategoryID = Products.CategoryID
WHERE (((Products.Discontinued)=0))

Modifing a View

You can modify an existing view by using using ALTER instead or CREATE.

Example:

ALTER VIEW "Alphabetical list of products" AS
SELECT Products.*, Categories.CategoryName
FROM Categories INNER JOIN Products ON Categories.CategoryID = Products.CategoryID
WHERE (((Products.Discontinued)=0))

You can also right click on the view and select “Design”.

Running a View

You run a view by using a SELECT statement.

SELECT TOP 1000 * 
FROM [AdventureWorks2008].[Sales].[vIndividualCustomer]

You can also right-click on the view and select “Select Top 1000 Rows”.

Running the above view results in this:

As you can see, it looks just like you’ve selected rows from a table. The difference is that, each column could potentially be coming from a different table.

 

 

SQL Server – Stored Procedures

 

Stored procedures are a powerful part of SQL Server. They can assist programmers and administrators greatly in working with the database configuration and its data.

A stored procedure is a precompiled group of Transact-SQL statements, and is saved to the database (under the “Stored Procedures” node). Programmers and administrators can execute stored procedures either from the SQL Server Management Studio or from within an application as required.

Transact-SQL, which is based on SQL (Structured Query Language), is the programming language used to interface between applications and their databases. Transact-SQL is a relatively easy language to learn and I highly recommend becoming familiar with it.

Benefits of Stored Procedures

Here are some key benefits in using stored procedures:

Benefit Explanation of benefit
Modular programming You can write a stored procedure once, then call it from multiple places in your application.
Performance Stored procedures provide faster code execution and reduce network traffic.

  • Faster execution: Stored procedures are parsed and optimized as soon as they are created and the stored procedure is stored in memory. This means that it will execute a lot faster than sending many lines of SQL code from your application to the SQL Server. Doing that requires SQL Server to compile and optimze your SQL code every time it runs.
  • Reduced network traffic: If you send many lines of SQL code over the network to your SQL Server, this will impact on network performance. This is especially true if you have hundreds of lines of SQL code and/or you have lots of activity on your application. Running the code on the SQL Server (as a stored procedure) eliminates the need to send this code over the network. The only network traffic will be the parameters supplied and the results of any query.
Security Users can execute a stored procedure without needing to execute any of the statements directly. Therefore, a stored procedure can provide advanced database functionality for users who wouldn’t normally have access to these tasks, but this functionality is made available in a tightly controlled way.

Creating a Stored Procedure

You create stored procedures in the SQL Server Management Studio using the CREATE PROCEDURE statement, followed by the code that makes up the stored procedure.

CREATE PROCEDURE StoredProcedureName AS
...

The following code creates a stored procedure called “MyStoredProcedure”:

CREATE PROCEDURE MyStoredProcedure AS
SET ROWCOUNT 10
SELECT Products.ProductName AS TenMostExpensiveProducts, Products.UnitPrice
FROM Products
ORDER BY Products.UnitPrice DESC

Once you run this code in the SQL Server Management Studio, the stored procedure is created and appears under the “Stored Procedures” node.

Modifying a Stored Procedure

If you need to modify an existing stored procedure, you simply replace the CREATE with ALTER.

ALTER PROCEDURE MyStoredProcedure AS
...

Running a Stored Procedure

You can run a stored procedure by using EXECUTE or EXEC. For example, to run the above stored procedure, type the following:

EXEC MyStoredProcedure

If the stored procedure has spaces in its name, enclose it between double quotes:

EXEC "My Stored Procedure"

If your stored procedure accepts any parameters, they are placed after the procedure name:

EXEC MyStoredProcedure @ParameterName="MyParameter"

So, here’s an example:

EXEC SalesByCategory @CategoryName ="Beverages"

Using The GUI

You can also use the graphical user interface to initiate the execution of a stored procedure.

To initiate a stored procedure this way:

  1. Navigate to the stored procedure
  2. Right click on the stored procedure and select “Execute Stored Procedure…”:
  3. A dialog will appear. Enter your chosen parameter values etc:
  4. Click “OK”
  5. SQL Server will now generate the SQL code and execute the stored procedure:

Parameters

A parameter is a value that your stored procedure uses in order to perform it’s task. When you write a stored procedure, you can specify the parameters that need to be supplied from the user. For example, if you write a stored procedure to select the address details about an individual, your stored procedure needs to know which individual to select. In this case, the user can provide an IndividualId or UserId to tell the stored procedure which individual to look up.

System Stored Procedures

SQL Server includes a large number of system stored procedures to assist in database administration tasks. Many of the tasks you can perform via SQL Server Management Studio can be done via a system stored procedure. For example, some of the things you can do with system stored procedures include:

  • configure security accounts
  • set up linked servers
  • create a database maintenance plan
  • create full text search catalogs
  • configure replication
  • set up scheduled jobs
  • and much more.

SQL Server – User Logins

 

SQL Server allows for the creation of user logins. Each individual who needs access to SQL Server can be given their own user account.

When the administrator configures these user logins, he/she can assign them to any number of roles and schemas, depending on the access that the individual is entitled to.

In this lesson, we will walk through the steps in creating a user login.

To Create a New User Login

  1. Using SQL Server Management Studio, expand the “Security” option and right click on “Logins”
  2. Click on “New Login”
  3. Complete the login properties in the “General” tab by providing a name for the login, choosing the Authentication method (providing a password if you choose “SQL Server authentication”), and selecting the database to use as a default. If you don’t choose a language, it will use the default for the current installation of SQL Server.

If you get an error that reads “The MUST_CHANGE option is not supported by this version of Microsoft Windows”, simply uncheck the “User must change password at next login” option. The error occurs because your operating system doesn’t support this option.

  1. Click the “Server Roles” tab if you need to apply any server-wide security privileges.
  2. Click the “User Mapping” tab to specify which databases this user account is allowed to access. By default, the login will be assigned to the “Public” role, which provides the login with basic access. If the login needs more access in one or more databases, it can be assigned to another role with greater privileges.

Note that these roles are “Database Roles” and are different to the server roles in the previous tab. Server roles are for administering the SQL Server. Database roles are created within each database and specify what the login can do within that database.

 

 

SQL Server – Server Roles

 

When creating a new user login in SQL Server, you get the option of assigning the login one or more server roles.

Server roles (not to be confused with database roles) are available for various database administration tasks. Not everyone should be assigned to a server role. In fact, only advanced users such as database administrators should be assigned a server role.

Accessing the Server Roles

To access the server roles in SQL Server Management Studio, expand the Security folder:

You view the properties of a server role by right clicking on it. You can then add users to the server role by clicking Add. In the screenshot below, Homer has been added to the securityadmin role.

Explanation of Server Roles

Here’s an explanation of the server roles defined in SQL Server 2008 during setup:

Server Role Description
sysadmin Can perform any task in SQL Server.
serveradmin Can set server-wide configuration options, can shut down the server.
setupadmin Can manage linked servers and startup procedures.
securityadmin Can manage logins and database permissions, read logs, change passwords.
processadmin Can manage processes running in SQL Server.
dbcreator Can create, alter, and drop databases.
diskadmin Can manage disk files.
bulkadmin Can execute BULK INSERT statements.
public Every SQL Server user account belongs to this server role. When a server principal has not been granted or denied specific permissions on a securable object, the user inherits the permissions granted to public on that object. Only assign public permissions on an object when you want the object to be available to all users.

As you can see, some of these roles allow very specific tasks to be performed. If you don’t have many technical users, it’s likely that you’ll only use one or two of these roles (including sysadmin).

SQL Server – Database Schemas

 

Ever since SQL Server 2005 was released, each object in a database has belonged to a database schema. SQL Server 2008 has continued with database schemas, and an explanation follows.

What is a Database Schema?

A database schema is a way to logically group objects such as tables, views, stored procedures etc. Think of a schema as a container of objects.

You can assign a user login permissions to a single schema so that the user can only access the objects they are authorized to access.

Schemas can be created and altered in a database, and users can be granted access to a schema. A schema can be owned by any user, and schema ownership is transferable.

Creating a Database Schema

To create a database schema in SQL Server 2008:

  1. Navigate to Security > Schemas
  2. Right click on Schemas and select New Schema…. Like this:
  3. Complete the details in the General tab for the new schema. In this example, the schema name is “person” and the schema owner is “Homer”.
  4. Add users to the schema as required and set their permissions:
  5. Add any extended properties (via the Extended Properties tab)
  6. Click OK.

Add a Table to the New Schema

Now that we have a new schema, we can add objects such as tables, views, and stored procedures to it. For example, we could transfer the table that we created in the earlier lesson to the new schema.

When we created that table (called “Individual”), it was created in the default database schema (“dbo”). We know this because it appears in our object browser as “dbo.Individual”.

To transfer the “Individual” table to the person “schema”:

  1. In Object Explorer, right click on the table name and select “Design”:
  2. From Design view, press F4 to display the Properties window.
  3. From the Properties window, change the schema to the desired schema:
  4. Close Design View by right clicking the tab and selecting “Close”:
  5. Click “OK” when prompted to save

Your table has now been transferred to the “person” schema.

Confirm your Change

To confirm the change:

  1. Refresh the Object Browser view:
  2. You will now see that Object Browser displays the new schema for the table (person.Individual):

 

 

SQL Server – Linked Servers

If you’ve been navigating around SQL Server Management Studio (SSMS), you may have come across the Linked Servers option.

The Linked Servers option allows you to connect to another instance of SQL Server running on a different machine, perhaps remotely in a different city/country. This can be useful if you need to perform distributed queries (query a remote database). Setting up a linked server is quite straight forward in SSMS, all you need is details of the remote server, and the database that you need to query.

Creating a Linked Server

To create a linked server:

  1. Navigate to Server Objects > Linked Servers
  2. Right click on Linked Servers and select New Linked Server…. Like this:
  3. Complete the details for the linked server. In this example, Data source refers to the name of the SQL Server machine (“Barts_database_server”), Catalogue refers to the name of the database (“Barts_database”). You can also configure options in the other two tabs depending on your requirements.

Distributed Queries

Once you have configured your linked server, you will be able to run queries etc against it. When you run a query against a linked server, it is referred to as a distributed query.

When you execute a distributed query against a linked server, you must include a fully qualified, four-part table name for each data source to query. This four-part name should be in the form linked_server_name.catalog.schema.object_name.

Here’s an example:

Barts_database_server.Barts_database.Person.Enemy

This example is based on the linked server example above. It assumes that the remote database has a schema called “Person” and a table called “Enemy”.

 

 

SQL Server Integration Services (SSIS)

 

SQL Server Integration Services (SSIS) allows you to integrate smoothly with other applications and data sources.

You can use Integration Services to copy or download files, send e-mail messages in response to events, update data warehouses, clean and mine data, and manage SQL Server objects and data.

In SQL Server 2005 and higher,  Data Transformation Services (DTS) is no longer installed with SQL Server. All DTS related tasks are now performed with SSIS.

Where Has DTS Gone?

If you have been working with earlier versions of SQL Server (SQL Server 2000 and earlier), you are probably familiar with DTS. DTS allowed you to transfer data between disparate sources into one or more destinations.

Well, starting from SQL Server 2005, DTS has been deprecated. DTS is no longer included with your SQL Server installation.

If you need DTS-type functionality in SQL Server 2008, you will need to use SQL Server Integration Services.

Backwards Compatibility

For backwards compatibility, Microsoft has provided installation files that provide run-time support for DTS packages. This will be useful if you have already created DTS packages in an earlier version of SQL Server

SQL Server Express & SSIS

SQL Server Integration Services is not available in the Express or Workgroup editions of SQL Server. If you need to use Integration Services, you need at least SQL Server Standard edition. And, if you need to use advanced features (such as data mining, text mining and cleansing or fuzzy transforms), you will need SQL Server Enterprise edition.

SQL Server – Summary

 

SQL Server is a powerful application, and this tutorial has provided an overview of the main concepts involved in administering SQL Server 2008 Express.

To recap briefly, we’ve:

  • Created databases
  • Created database tables
  • Inserted data into those tables
  • Used the SSMS to run a query
  • Created a login account
  • Looked at server roles
  • Learned about database schemas
  • Looked at linked servers
  • and more

What Next?

There were a couple of areas we didn’t touch on during this tutorial. This is mainly due to the fact that it is aimed at the free SQL Server Express version, which has its limitations.

All is not lost though. My SQL Server 2000 tutorial covers some of the areas that weren’t included here. For example, you can learn about the SQL Server Agent for scheduling jobs to run automatically. You can also read about the SQL Profiler, which allows you to monitor events within your SQL Server.

The most logical next step from learning SQL Server is to learn SQL itself (if you haven’t already done this that is). SQL stands for Structured Query Language and enables you to perform database related tasks programatically. Most of the tasks in this tutorial can be done programatically using SQL.

Probably the most common use for SQL is to work with the data in the database. For example, to insert data, select data, update data, or to delete data. Any database driven website will have many SQL statements doing this stuff.

 

Log Shipping Manual Failover Steps


 

——————————————————————————————————————-

                                    LOG Shipping Failover (Disaster Recovery) steps

——————————————————————————————————————–

Step 1:

——————

Try to backup the Tail end of the transaction log in primary server with NORECOVERY option i.e., perform a transaction log backup if the primary is still accessible.

Backup log DBName to disk = “Local path or Network path” with NORECOVERY —> A

Note: Verify the last backup file has been taken, use below T-SQL to get the backups info.

       Verify the backups with LSN numbers

Query:

USE MSDB

SELECT * FROM BACKUPSET

Else execute the below T-SQL in secondary server to bring the secondary online. verify the last and the latest

LOG backup file has been restored. Verify with LSN number which is baced up.

Restore database DBName with Recovery —> C

Query:

USE MSDB

SELECT * FROM RESTOREHISTORY

Step 2:

———-

If you were able to perform —> A in step 1 then proceed with —> B in step 2 to bring the secondary db

online from read-only state.

If you were able to perform only —> C in step1 then go to step 3

Restore log DBName from disk = “Local path or Network path” with RECOVERY —> B

Step 3:

———-

The syslogins and sysusers table in primary and secondary server should be in sync otherwise

the DB users and other application users from primary will not be able to login into SQL server or

into the DB in secondary server after failover occurs.

Two ways to accomplish the above task:

1)

Fixing Orphaned Users and Logins which are not in sync with production (primary):

—————————————————————————————————-

Create the highly critical appplication users logins in the secondary server similar to primary

just before configuring log shipping.Use the below sps to resolve orphaned users

Finding Orphaned users:

USE <DB Name>

GO

sp_change_users_login @Action=’report’

GO

Fixing Orphaned users:

USE <DB Name>

GO

sp_change_users_login @Action=’update_one’, @UserNamePattern=”, @LoginName=”

GO

2)

 

Backup the master.. syslogins table to a text file.This file will be used to synchronize syslogins to sysusers

in secondary when failover occurs.This can be done as following in the command prompt,

C:\any path > bcp master . . syslogins out C:\any path\syslogins.dat -N -S servername -T

You can schedule it as a SQL Job to run @specific schedule preferable every 1 hr.

You can make use of the sp_resolve_logins to resolve the old primary server logins on the new primary server.

sp_resolve_logins [ @dest_db = ] ‘dest_db’

, [ @dest_path = ] ‘dest_path’

, [ @filename = ] ‘filename’

Note: Refer BOL for each parameter.

Step 4:

———–

Also Disable the log shipping jobs in the primary and secondary servers,once failover occurs.

Step 5:

———–

Once the failover occurs the original secondary server is configured as primary and log shipping is again newly configured from this new primary server (original secondary) to original primary (now secondary).

Step 6:

———–

When you once again want to revert to the original state of log shipping i.e original primary was primary and original secondary was secondary, you need take a full backup in new primary server (original secondary) and restore it in original primary and reconfigure the log shipping from the original primary to original secondary.

———————————————————————————————————-

                         Role Change in Transactional Log Shipping , SQL Server 2005

———————————————————————————————————-

Step 1:

——————

Copy any uncopied backup files from the backup share of original primary server to the

copy destination folder of original secondary server.

Step 2:

——————

Apply any unapplied transaction log backups which were copied in step1 sequentially in the secondary database.

Step 3:

——————

If the original primary server instance is not damaged and if it is still accessible, back up the tail of the transaction log of the primary database using WITH NORECOVERY.

This leaves the database in the restoring state, and eventually you will be able to roll

this database forward by applying transaction log backups from the replacement

primary database(i.e original secondary server)

Step 4:

——————

After the secondary servers are synchronized, you can fail over to the secondary by recovering its secondary database and redirecting clients to that server instance. Recovering puts the database into a consistent state and brings it online. Execute the below command

–To bring the original secondary db online with the last tran log copied into secondary server

Restore log DBName from disk=’Path of the last tran log copied into secondary’ with Recovery

if Step 3 is not possible i.e if you cannot access the original primary server just execute,

–To bring the original secondary db online if no transactional logs available

 

Restore database DBName with Recovery

The above command will bring the original secondary online from Read-only mode.

Now you original secondary has become primary server, now you can repoint your applications to this server. After sometime if your original primary server is up and running fine, you can see that the Original primary db will be in restoring state (becoz of Step 3) and hence transactional logs can be applied from the new primary.

Step 5:

—————-

Disable the log shipping backup job on the original primary server, and the copy and restore jobs

on the original secondary server.

Step 6:

—————

On your original secondary (the new primary), reconfigure log shipping by including the below steps

Use the same share for creating backups that you created for the original primary server.

When adding the secondary database, in the Secondary Database Settings dialog box, enter the name of the

original primary database in the Secondary database box.

In the Secondary Database Settings dialog box, select No, the secondary database is initialized because

it is already in restoring state and so tran logs can be applied.

Step 7:

—————

Enable the log shipping backup job on the secondary server (the new primary server),

and the copy and restore jobs on the primary server (the new secondary server).

SQL Server Clustering and TIPs


Tips for SQL Server Cluster:
 
 
A server cluster allows you to connect a number of physical servers—or nodes—that act as failover partners for each other. The redundancy a cluster provides spells more uptime for your critical
operations. I’ve implemented lots of clusters in my 13 years working with SQL Server™, and each had its own set of issues. That experience allowed me to gather a number of tips that can help make your clustering efforts easy and successful.
Server clusters take advantage of the built-in clustering capabilities of the Enterprise Editions of the Windows Server® family. In fact, for clustering purposes, Windows Server 2003 is significantly better than Windows 2000 Advanced Server. To maximize the benefits you’ll gain from clustering, you need the right hardware-and that involves some expense.
It’s not sufficient to slap a couple of servers together with a shared disk, and you can’t rely on the fact that individual hardware components may be in the Windows® Catalog (formerly known as the Hardware Compatibility List). The system as a whole must be in the Windows Catalog. But don’t worry-there are some approved, lower-cost cluster solutions available.
Figure 1 shows a typical cluster configuration.
Figure 1 A typical cluster (Click the image for a larger view)
Of course, there’s more to clustering than hardware-you also need to choose the right edition of SQL Server 2005. The Enterprise Edition enables clustering, as well as other useful features, such as the ability to leverage more CPUs, distributed and updateable partitioned views, built-in log shipping, automatic use of indexed views. If you already have an Enterprise Edition license, you should consider clustering, whether or not you have the two to eight servers necessary to form a traditional cluster (we’ll talk about one-node clusters in a minute). If you have SQL Server 2005 Standard Edition, you can install a two-node cluster.
Windows Server 2003 Enterprise and Datacenter Editions come with clustering built in. All you have to do is run Cluster Administrator to set up your cluster. You can add all nodes at once or one at a time. Similarly, when you install SQL Server, you can elect to install on an individual, non-clustered server, or you can install a virtual instance on a cluster. If you elect to install a virtual instance, you can install on all nodes of the cluster, just some nodes, or even only one node.
Finally, to achieve the true aim of clustering-high availability-you need qualified people and well-rehearsed procedures to follow if bad things happen. Although clustering is good insurance against hardware failure, it doesn’t prevent user errors, such as the dropping of a critical table by a sleep-deprived DBA in the middle of the night.
 
 
 
One-Node Clusters
 
Even if all you need at the moment is a single server, consider creating a one-node cluster. This gives you the option of upgrading to a cluster later, thus avoiding a rebuild. Just be sure that the hardware you choose is on the cluster portion of the Windows Catalog.
It’s not merely for high availability that you’d want the option to add a node at a later date. Consider what happens if you find that your server just doesn’t have the necessary capacity. That translates to a migration-and that takes time and effort. If you have a one-node cluster, migration is easier with far less downtime. You add the new node to the cluster, add the SQL Server binaries and service packs to the new node, and then failover to the new node. Then you add any post-service pack updates and, finally, evict the old node. The downtime is only the time it takes to failover and add the updates (if any).
 
 
 
Adding Nodes
 
Since all nodes in a cluster must be the same, you’ll want to act sooner, rather than later, to get that extra node. If you wait too long, the node may go out of production. On one project, I had to rebuild a node in a SQL Server 2000 cluster. I had the OS/network admin handle the basic machine build, then I jumped in to add it back to the cluster and prepare it for service as a SQL Server node. All went well until I failed over to the new node. Much to my dismay, it failed right back. To make a long story short, although I had prepared a detailed document on building a new cluster, including adding the cluster service and SQL Server service accounts to both nodes, the document wasn’t followed explicitly. The admin didn’t add those service accounts to the rebuilt node, so the privileges they had before the rebuild no longer existed.
It took me a long time to track that one down. One day it occurred to me to look at local group membership. Once I added the two accounts, failover went smoothly. Then I got to thinking. Rebuilding a node is something you don’t do frequently and, if you do, it’s an emergency. Although I had a document in place, it wasn’t used. We could have automated the security part by simply writing a brief script to add those two accounts and make any other necessary customizations. Things have improved in SQL Server 2005, though. The installer requires you to set domain-level groups for the SQL Server service accounts.
Of course, this got me thinking even more. You can create scripts that invoke CLUSTER.EXE to add the node to your Microsoft® Cluster Server (MSCS) cluster. All you have to do is feed the script the name of the node and it can handle the rest. In an emergency, automation is really your friend.
 
 
 
N+1 Clusters
 
Sometimes, the reason for adding a node to a cluster isn’t that you’re replacing a node. You could be adding more SQL Server instances to your cluster and each instance needs separate disk resources. Though multiple instances can run on a single node, they would be sharing CPU and RAM-and that could spell poor performance. Ideally, only a single instance should run on a single node. How do you ensure that when you fail over? Simple: the answer is that one node has no services running on it, while the other nodes each run one SQL Server instance. In fact, that’s the definition of an
N+1 cluster: N instances running on N+1 nodes. The extra node is the backup.
 
 
 
Upgrading SQL Server
 
Upgrading a clustered instance of SQL Server is not for the faint of heart: it’s clustered for a reason-you need uptime. But SQL Server 2005 offers a number of enhancements you want to take advantage of, so if and when you’ve got to do it, you can proceed without a lot of downtime.
What are your choices? Let’s look at the most expensive solution first: creating a whole new cluster, which means new servers and perhaps a new storage area network (SAN). You can probably keep the existing network switches, but that’s about all. Obviously, this approach isn’t cheap but it has advantages. New hardware generally performs much better than old, with disk capacity and speed increasing at an ever-growing rate. Thus, you’ll get a performance boost with new hardware alone. You may even want to lease your equipment just to stay ahead of the curve.
Once you have the hardware in place, you can create your new virtual SQL Server on this setup, copy your production databases over, and then put the new system through its paces, leaving plenty of time to shake out the bugs before cutover day. Just be sure to script out the logins from your existing server. (Check out support.microsoft.com/kb/246133. It’s also a good idea to update your login build script in case of catastrophic failure.)
To minimize downtime, you’ll likely have to use log shipping, unless your databases are quite small and you have a period of time in which no users are connected. You can log-ship right up to just before cutover. Then, kick the users out, cut and ship the final log, then point the app at the new instance. (Check out the database mirroring section below for an interesting alternative to log shipping.) If you use DNS aliases, you probably won’t even need to point the apps to the new instance. Just update the DNS alias instead. This approach has the advantage that if you get part way through the migration and have to revert back to the original, at least you have the original.
You can take a less expensive route, but it requires more advance planning. A cluster can support more than one SQL Server instance, but each instance must have its own disk resources. So when you’re carving up your SAN, set one LUN aside for a future upgrade. To perform the upgrade, install SQL Server binaries on this disk resource. You can exercise the system and, when you’re ready, shut down the current SQL Server, move the disk resources from the old SQL Server group, update the dependencies, and bring the new SQL Server instance online. Attach the databases from the old instance, and you’re up and running. (You did back everything up ahead of time, right?)
That’s the less-expensive approach-and it carries with it some risk. If something goes bad, you can’t detach the databases from the new instance and put them back. You’re reduced to restoring from backups-and that can mean some serious downtime.
An alternative is putting two instances of SQL Server on your SAN, assuming you have enough space. You restore production backups (and log ship) to the new instance, and proceed much as I described earlier. However, now you have a fallback. Once you’ve done the migration, you can free up the SAN resources from the old instance. It’ll cost you only the price of the extra disks.
Load Balancing
 
Let’s start by debunking a common misconception. You use MSCS clustering for high availability, not for load balancing. Also, SQL Server does not have any built-in, automatic load-balancing capability. You have to load balance through your application’s physical design. What does that mean?
As a table grows, you can expect to see some degradation in performance, particularly when table scans are involved. When you get into the millions or billions of rows, the traditional solution has been to use partitioned views, which are made up of tables with identical schemas hooked together with UNION ALL’s. Also, CHECK constraints are put in place to differentiate the member tables, and this prevents data duplication across the partitioned view. If the column that is used in the CHECK constraint is also part of the primary key, the view is updatable.
If the member tables are on their own filegroups, you may get better disk performance if the files in those filegroups are on separate physical drives. The tables can even be in separate databases. In SQL Server 2005, however, as long as all of the data is in the same database, you can use table partitioning, which is far easier to implement.
But let’s say you’ve tried your best with table partitioning or (local) partitioned views and still things are slow. If you have SQL Server 2000 or SQL Server 2005, you can use distributed partitioned views. The major difference is that the member tables can reside on different instances of SQL Server and those instances can be installed on an N+1 cluster. Why is this a good idea? If any one member table goes offline in a partitioned view, the entire view goes offline. Making those members part of a cluster then gives you the reliability you need to support performance and provide load balancing.
Do You Really Need a Cluster?
 
Perhaps you have some spare servers lying around, but they aren’t in the Windows Catalog for clusters. It’s a shame to have to go out and buy new ones just to support a cluster when those servers are available.
Database mirroring may be an attractive alternative to clustering. Mirroring involves three elements: an instance that houses the mirrored database is known as the principal; the backup server is known as the mirror; and, if you want automatic failover, a third server-known as the witness-is required. Briefly, a transaction in a database on the principal gets run again in the mirror. If the principal goes down, the database can fail over to the mirror, automatically if you have a witness. You have to set up mirroring for each of your application databases and you can’t mirror system databases.
The mirror is a separate instance of SQL Server, unlike in a cluster, and can be located thousands of miles away. Its caches get populated by the update activity that occurs as a result of the transactions duplicated from the principal. Assume, of course, that there is no activity on the mirror other than receiving the mirrored transactions from the principal. Failover is generally quicker than in a cluster since SQL Server is already running on the mirror. Because the caches are at least partially primed, the initial performance is not as sluggish as it might be in the clustered scenario. And note that when a mirrored database fails over, the role of principal and mirror are reversed.
The downside of database mirroring is the need for double the total disk capacity than with a cluster. You’ll also need more CPU power-if you go for synchronous mode with no data loss. As I said, high availability isn’t cheap.
A Combined Approach
 
Since a mirror can be located quite remotely from the principal, it’s a good choice for Disaster Recovery (DR) plans. Your cluster can be your first line of defense, but what happens if you employ both clustering and mirroring? In a cluster failover, if you have a witness as part of your mirroring configuration, the mirror will become the principal while the clustered SQL Server is coming online. However, note that failover from the new principal back to the (clustered) new mirror is not automatic. Consequently, it’s better not to enable automatic failover for your mirrored databases when used in conjunction with a cluster.
DR isn’t the only reason you’d use mirroring; it’s also useful if you have to apply a service pack or hotfix to your principal, in which case you can manually failover to your mirror. While applying the service pack or hotfix, the old principal server is temporarily offline and the committed transactions occurring at the new principal are queued up, waiting to be sent back to the new mirror (old principal). Once the service pack or hotfix installation is complete, synchronization will take place and eventually the two servers will be completely in sync. Now, you can switch the roles of principal and mirror. Downtime was the couple of seconds to failover and back. You can use this approach to migrate your SQL Server to another box. Just don’t fail back.

 

Virtual Server Adds Flexibility
 
Virtualization allows you to run one or more operating systems concurrently on a single physical server. Virtualization software adds another layer of capabilities to the cluster concept because you can cluster the software. Consequently, if the server on which the host is running fails, then it-and its guest OSs-failover to a backup node. This could be a very easy way to migrate a guest server. Plus, the guest OS does not have to be cluster-capable. Thus, you could run SQL Server Workgroup Edition inside a guest Windows Server 2003, running on Microsoft Virtual Server 2005 on a cluster.
Indirectly, you have essentially clustered Workgroup Edition (see Figure 2).
Figure 2 Using a virtual server (Click the image for a larger view)

 

In Control
 
If you’re in charge of a SQL Server implementation, you need to know that your server is always available. Server clustering helps to ensure that’s always the case. This article provides some hard-earned tips to help you get started, and you’ll find more useful information in the “Clustering Resources” sidebar.

High Availability and Disaster Recovery process


What Do The Terms High Availability And Disaster Recovery Mean?

Before we get too deep into this, I figured I would define the terms High Availability and Disaster Recovery since they are quite often used together and sometimes, mistakenly, interchangeably.

High Availability – Protects against hardware/software issues that may cause downtime.  An example could be a bad processor or memory issue.

Disaster Recovery – The ability to recover from a catastrophic event, such as a natural disaster or, on a more local level, a data center disaster (fire, flood, cooling outage, etc) and be able to continue business operations with minimal loss and minimal downtime.

These two concepts really go hand in hand in ensuring business continuity and both should be built into a business continuity solution.

High Availability/Disaster Recovery Options In SQL Server

SQL Server has a number of native features built in to provide some basic measure of high availability/disaster recovery.  These include:

  • Database backups – this is probably the most basic form of disaster recovery for SQL Server and one that should be practiced in every situation, regardless of what other HA/DR solutions are in place.
  • Clustering – this provides a way of binding two or more like Windows servers together in what is known as a cluster.  Each of the servers in the cluster is considered a node and, typically, one node is “active” (processing transactions) and the other nodes are “passive”.  There is a private network that runs between the nodes so that if the active node fails to deliver a “heartbeat” that the other node(s) can detect, an automatic failover is invoked and one of the passive nodes is promoted to active.
    • Pros of Clustering:  Clustering provides redundancy in the case of server hardware failure and provides a fairly quick (within 5 minutes), automatic solution to move processing to another server.
    • Cons of Clustering
      • Does not protect against disk issues since all nodes share the database on the same disk.
      • Only protects against issues with that specific server, not data center-wide since all nodes are located in same data center.
      • Only addresses availability, not disaster recovery
  • Database Mirroring – new in SQL Server 2005, database mirroring offers a way to mirror a database to another server (and disk).  All transactions are sent to the mirror server as they are committed on the production server.  Depending on how it is implemented, can automate failover, similar to clustering.
    • Pros of Database Mirroring
      • Provides some form of both HA and DR since mirror can be located in another data center, thus protecting you from hardware failure and disaster.
      • Fast.  Mirror is updated virtually instantly
    • Cons of Database Mirroring
      • Only done at the database level, not the instance level and only user databases can be mirrored, not system databases.  This means that some other form of synchronizing logins and other system database objects has to be devised.
      • To be get all features of database mirroring, Enterprise Edition has to be used. 
      • Any SQL Agent jobs must be manually enabled on the mirror if a failover takes place.
  • Log Shipping – this is one of the oldest forms DR available in SQL Server and involves setting up a warm standby server with a copy of the user database on it that is to be protected and backups of the transaction log from the production database are periodically shipped to the standby server and applied.
    • Pros of Log Shipping:
      • Tried and true technology that has been around for a long time.
      • At the database level, can provide both HA and DR protection because warm standby can be located in another data center.
    • Cons of Log Shipping:
      • Amount of potential data loss is higher than with the other options because logs are usually shipped no more frequently than every 5 minutes and typically, more like every 30 minutes to an hour.
      • Failover is fairly manual and time intensive.  Takes longer than other options to bring warm standby online.
      • Like database mirroring, this only protects a database, not the entire instance.
      • For SQL Server 2000, this feature is only available in Enterprise Edition.  Available in Standard Edition from SQL Server 2005 forward.
      • Does not transfer non-logged transactions or schema changes (security, addition of database objects
  • Replication – while not necessarily intended as an HA/DR solution replication can be used in both scenarios.
    • Pros of Replication:
      • Real-time data updates.  Minimal data loss, if at all.
      • Can be used for both HA/DR as publisher and subscriber can be in different data centers.
    • Cons of Replication:
      • Complicated to setup and maintain.
      • No provided failover mechanism.  This has to be created as part of solution.
      • Again, only database specific solution, not instance specific.

Given that these native solutions were really only database and not instance based, we chose to look at third party options.  The product that we settled on was Double-Take.  While certainly not an easy solution to implement, Double-Take was attractive to us because it allowed us to set up a stand-by server in our hot site for each of our mission critical SQL Servers and then continuously replicate the entire instance to the stand-by server.  It also provides for either automated (if the server stops responding) or manual failover (we have opted for manual) through a control applet that automatically swaps DNS entries between the production and the standby server when a failover is initiated.

Double-Take:  How It Works

Both the production and the standby server have to have the exact same SQL Server configuration (Edition, build, directory structure/location, instances, etc) installed.  The Double-Take software is then installed on both the production and standby server and then, through the Double-Take software, the production server is configured as the source and the standby server is configured as the target.

During the configuration process, you can configure Double-Take to compress the data before it replicates it over the WAN to the stand-by server.  This can save a ton of bandwidth and makes sure that the transactions are queued on the target server as quickly as possible ensuring minimal data loss in the event of a failure.

Additionally, Double-Take will also generate scripts for the failover, failback, and restore of the databases back to the production server when it is back in commission.  These scripts and/or the replication can be customized by overriding the automatically generated rules that Double-Take creates.

Find free space in database files in SQL Server


 

—– Find free space (file size) for a particular Database—–

USE Northwind

 SELECT

     a.FILEID,

     CONVERT(decimal(12,2),ROUND(a.size/128.000,2)) as [FILESIZEINMB] ,

     CONVERT(decimal(12,2),ROUND(fileproperty(a.name,’SpaceUsed’)/128.000,2)) as [SPACEUSEDINMB],

     CONVERT(decimal(12,2),ROUND((a.size-fileproperty(a.name,’SpaceUsed’))/128.000,2)) as [FREESPACEINMB],

     a.name as [DATABASENAME],

     a.FILENAME as [FILENAME]

 FROM

     dbo.sysfiles a

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

EXEC sp_spaceused
SELECT fileid, name, filename, size, growth, status, maxsize
FROM dbo.sysfiles WHERE (status & 0x40) <>0
DBCC sqlperf(logspace)
DBCC showfilestats

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

/***************************************************************************

if not exists (select * from dbo.sysobjects where id = object_id(N'[dbo].[FileDetails]')
      and OBJECTPROPERTY(id, N'IsUserTable') = 1)
 BEGIN
CREATE TABLE dbo.FileDetails (    
FileId int
, FileGroupId int
, TotalExtents int
, UsedExtents int
, Name nvarchar( 128 )
, FileName nvarchar( 500 )
, TotalSize AS ( ( TotalExtents * 64.0 ) / 1024 )
, UsedSize AS ( ( UsedExtents * 64.0 ) / 1024 )
)   
END
----------------------------------------------------------------------------------------------------

 

exec master..sp_MSforeachdb 'USE ?
INSERT INTO <<mydb>>.dbo.FileDetails (FileId, FileGroupId, TotalExtents, UsedExtents, Name, Filename)
EXEC(''DBCC showfilestats'')'
------------------------------------------------------------------------------------------------------ SELECT * FROM <<mydb>>.dbo.FileDetails
***************************************************************************************************/

USE master 
GO 
 
sp_configure ‘show advanced options’, 1; 
GO 
RECONFIGURE; 
GO 
 
EXEC sp_configure ‘Ole Automation Procedures’; 
GO 
 
sp_configure ‘Ole Automation Procedures’, 1; 
GO 
RECONFIGURE; 
GO 
 
 
sp_configure ‘show advanced options’, 0; 
GO 
RECONFIGURE; 
GO 
——————————————————– 
— create work tables 
——————————————————– 
IF EXISTS (SELECT * FROM DB_UTILS.dbo.sysobjects WHERE name = ‘tb_Databases’) 
        BEGIN 
                DROP TABLE DB_UTILS.dbo.tb_Databases 
                PRINT ‘DROPPED tb_Databases From DB_UTILS’ 
        END 
CREATE TABLE    DB_UTILS.dbo.tb_Databases 
        ( 
        row_id                  INT     IDENTITY(1,1), 
        dbname                  SYSNAME, 
        db_size                 INT,                            — Size of database, in kilobytes. 
        remarks                 VARCHAR(254) 
        ) 
 
———————————————————- 
IF EXISTS (SELECT * FROM DB_UTILS.dbo.sysobjects WHERE name = ‘tb_DBShowFileStats’) 
        BEGIN 
                DROP TABLE DB_UTILS.dbo.tb_DBShowFileStats 
                PRINT ‘DROPPED tb_DBShowFileStats From DB_UTILS’ 
        END 
 
CREATE TABLE    DB_UTILS.dbo.tb_DBShowFileStats 
        ( 
        row_id                  INT     IDENTITY(1,1), 
        dbname                  NVARCHAR(255), 
        fileid                  INT, 
        filegroup               INT,     
        totalextents            INT, 
        usedextents             INT, 
        name                    VARCHAR(255), 
        filename                VARCHAR(255) 
        ) 
 
———————————————————- 
IF EXISTS (SELECT * FROM DB_UTILS.dbo.sysobjects WHERE name = ‘tb_FixedDriveSpace’) 
        BEGIN 
                DROP TABLE DB_UTILS.dbo.tb_FixedDriveSpace  
                PRINT ‘DROPPED tb_FixedDriveSpace From DB_UTILS’ 
        END 
CREATE TABLE    DB_UTILS.dbo.tb_FixedDriveSpace  
        ( 
        drive_name      CHAR(1) PRIMARY KEY,  
        free_space      DECIMAL(10,3) NULL, 
        total_size      DECIMAL(10,3) NULL 
        )  
         
——————————————————– 
— create views 
——————————————————– 
USE DB_UTILS 
GO 
 
IF  EXISTS (SELECT * FROM DB_UTILS.sys.views WHERE name = ‘vw_FixedDriveSpace’) 
        BEGIN 
                DROP VIEW vw_FixedDriveSpace 
                PRINT ‘DROPPING View vw_FixedDriveSpace’ 
        END 
GO 
 
CREATE VIEW vw_FixedDriveSpace  AS 
 
SELECT  TOP 100 PERCENT  
        a.drive_name + ‘:\’                                     as [Drive], 
         STR(SUM(a.total_size), 10, 2)                          as [Drive_Size], 
         STR(SUM((a.total_size – a.free_space)), 10, 2)         as [Space_Used], 
         STR(SUM(a.free_space), 10, 2)                          as [Space_Free], 
         STR((a.free_space * 100 / a.total_size), 10, 2)        as [Pct_Free] 
FROM     DB_UTILS.dbo.tb_FixedDriveSpace   as a 
GROUP BY a.drive_name, a.free_space, a.total_size 
ORDER BY (a.free_space * 100 / a.total_size), a.drive_name 
GO 
———————————————————- 
IF  EXISTS (SELECT * FROM DB_UTILS.sys.views WHERE name = ‘vw_DBFreeSpace’) 
        BEGIN 
                DROP VIEW vw_DBFreeSpace 
                PRINT ‘DROPPING View vw_DBFreeSpace’ 
        END 
GO 
 
CREATE VIEW vw_DBFreeSpace      AS 
 
SELECT  TOP 100 PERCENT  
        SUBSTRING(a.dbname, 1, 26)                                                      as [Name], 
         SUBSTRING(b.name, 1, 26)                                                       as [FileName], 
         LEFT(b.filename, 3)                                                            as [Drive], 
         STR(SUM((b.totalextents * 64.0) / 1024.0), 10, 2)                              as [DB_File_Size], 
         STR(SUM((b.usedextents  * 64.0) / 1024.0), 10, 2)                              as [Space_Used], 
         STR(SUM((b.totalextents – b.usedextents)  * 64.0 / 1024.0), 10, 2)             as [Space_Free], 
         STR(SUM((((b.totalextents – b.usedextents)  * 64.0) / 1024.0 * 100.0 /  
         ((b.totalextents * 64.0) / 1024.0))), 10, 2)                                  as [Pct_Free] 
FROM     DB_UTILS.dbo.tb_Databases                           as a 
         INNER JOIN DB_UTILS.dbo.tb_DBShowFileStats       as b on a.dbname = b.dbname 
GROUP BY a.dbname, b.name, b.filename, b.totalextents, b.usedextents 
ORDER BY (((b.totalextents – b.usedextents)  * 64.0) / 1024.0 * 100.0 / ((b.totalextents * 64.0) / 1024.0)), 
         a.dbname, 
         b.name 
GO 
 
 
USE DB_UTILS 
GO 
 
IF  EXISTS (SELECT * FROM DB_UTILS.sys.objects WHERE name = ‘bp_DBandServerSpaceReport’ AND type in (N’P’, N’PC’)) 
        BEGIN 
                DROP PROCEDURE bp_DBandServerSpaceReport 
                PRINT ‘DROPPING bp_DBandServerSpaceReport’ 
        END 
GO 
 
CREATE PROCEDURE bp_DBandServerSpaceReport AS 
 
SET NOCOUNT ON 
— work variables 
—————– 
DECLARE @SQLCmd                 NVARCHAR(MAX), 
        @Result                 INT, 
        @FSO                    INT, 
        @DriveNameOut           INT, 
        @TotalSizeOut           VARCHAR(20),  
        @MB                     NUMERIC 
 
SET     @MB = 1048576 
 
— clear work tables 
——————– 
TRUNCATE TABLE DB_UTILS.dbo.tb_Databases 
TRUNCATE TABLE DB_UTILS.dbo.tb_DBShowFileStats 
TRUNCATE TABLE DB_UTILS.dbo.tb_FixedDriveSpace 
———————————————————- 
— load database table with database names 
———————————————————– 
SET             @SQLCmd = ‘master..sp_databases’ 
INSERT INTO     DB_UTILS.dbo.tb_Databases (dbname, db_size, remarks) EXEC sp_executesql @SQLCmd 
 
 
— loop through databases and load file stats table with information for each database 
————————————————————————————– 
DECLARE         @dbname VARCHAR(200) 
SET             @dbname = ” 
 
WHILE   @dbname IS NOT NULL 
        BEGIN 
                SELECT  @dbname = MIN(dbname) 
                FROM    DB_UTILS.dbo.tb_Databases 
                WHERE   dbname > @dbname 
 
                IF      @dbname IS NOT NULL 
                        BEGIN 
                                SELECT          @SQLCmd = ‘USE [‘ + @dbname + ‘]; DBCC SHOWFILESTATS’ 
                                INSERT INTO     DB_UTILS.dbo.tb_DBShowFileStats (fileid, filegroup, totalextents, usedextents, name, filename) EXEC sp_executesql @SQLCmd 
 
                                UPDATE  DB_UTILS.dbo.tb_DBShowFileStats 
                                SET     dbname = @dbname 
                                WHERE   dbname IS NULL 
                        END 
        END 
 
— loop through databases and load file stats table with information for each database 
————————————————————————————– 
INSERT  DB_UTILS.dbo.tb_FixedDriveSpace(drive_name, free_space) EXEC master.dbo.xp_fixeddrives  
 
EXEC    @Result = sp_OACreate ‘Scripting.FileSystemObject’, @FSO OUT  
 
IF @Result <> 0  
        BEGIN 
                EXEC sp_OAGetErrorInfo  @FSO 
        END 
 
— loop through and get drive metadata for each drive on the server 
——————————————————————- 
DECLARE         @drive_name VARCHAR(50) 
SET             @drive_name = ” 
 
WHILE   @drive_name IS NOT NULL 
        BEGIN 
                SELECT  @drive_name = MIN(drive_name) 
                FROM    DB_UTILS.dbo.tb_FixedDriveSpace  
                WHERE   drive_name > @drive_name 
 
                IF      @drive_name IS NOT NULL 
                        BEGIN 
                                — get drive information 
                                ———————— 
                                EXEC    @Result = sp_OAMethod @FSO, ‘GetDrive’, @DriveNameOut OUT, @drive_name 
 
                                — error handling 
                                —————– 
                                IF      @Result <> 0  
                                        BEGIN 
                                                EXEC sp_OAGetErrorInfo @FSO  
                                        END 
 
                                — get drive size 
                                —————– 
                                EXEC    @Result = sp_OAGetProperty  @DriveNameOut, ‘TotalSize’, @TotalSizeOut OUT  
 
                                — error handling 
                                —————– 
                                IF      @Result <> 0  
                                        BEGIN 
                                                EXEC sp_OAGetErrorInfo  @DriveNameOut  
                                        END 
 
                                — update temp table with values 
                                ——————————– 
                                UPDATE  DB_UTILS.dbo.tb_FixedDriveSpace  
                                SET     total_size      = @TotalSizeOut / @MB 
                                WHERE   drive_name      = @drive_name  
                        END 
        END 
 
— destroy the fso 
—————— 
EXEC    @Result = sp_OADestroy @FSO  
 
— error handling 
—————– 
IF      @Result <> 0  
        BEGIN 
                EXEC sp_OAGetErrorInfo  @FSO 
        END 
GO 
 
 
— create procedure that will send mail 
————————————— 
IF  EXISTS (SELECT * FROM DB_UTILS.sys.objects WHERE name = ‘bp_DBandServerSpaceReport_SendEmail’ AND type in (N’P’, N’PC’)) 
        BEGIN 
                DROP PROCEDURE bp_DBandServerSpaceReport_SendEmail 
                PRINT ‘DROPPING bp_DBandServerSpaceReport_SendEmail’ 
        END 
GO 
 
CREATE PROCEDURE bp_DBandServerSpaceReport_SendEmail AS 
 
— send report 
————– 
 
— email variables 
—————— 
DECLARE @EmailAddress           VARCHAR(30), 
        @EmailSubject           VARCHAR(200), 
        @EmailImportance        VARCHAR(10), 
        @EmailQuery             VARCHAR(4000), 
        @EmailMessage           VARCHAR(500), 
        @EmailFormat            VARCHAR(20), 
        @EmailResultsWidth      INT 
 
— drive space query 
——————– 
SELECT  @EmailAddress           = ‘NEED TO ENTER YOUR EMAIL ADDRESS HERE’, 
        @EmailSubject           = ‘Database Size Report – ‘ + @@SERVERNAME, 
        @EmailMessage           = ‘The System Drive Space Results Are As Follows:’      + CHAR(10) +  
                                  ‘———————————————-‘, 
        @EmailQuery             =  
                                ‘SET NOCOUNT ON;’                                       + CHAR(10) +  
                                ‘PRINT   ””’                                          + CHAR(10) +  
                                ‘SELECT  Drive              as [Drive],’                + CHAR(10) +  
                                ‘        Drive_Size         as [Drive Size (MB)],’      + CHAR(10) +  
                                ‘        Space_Used         as [Space Used (MB)],’      + CHAR(10) +  
                                ‘        Space_Free         as [Space Free (MB)],’      + CHAR(10) +  
                                ‘        Pct_Free           as [Pct. Free]’             + CHAR(10) +  
                                ‘FROM    DB_UTILS.dbo.vw_FixedDriveSpace’               + CHAR(10) +  
                                ‘ORDER BY Pct_Free, Drive’                              + CHAR(10) +  
                                ‘PRINT   ””’                                          + CHAR(10) +  
                                ‘PRINT   ””’                                          + CHAR(10) +  
                                ‘SELECT  ”                                         ” as ”The Database Space Results Are As Follows:”’               + CHAR(10) +  
                                ‘SELECT  Name            as [DB Name],’                 + CHAR(10) +  
                                ‘        FileName        as [DB File Name],’            + CHAR(10) +  
                                ‘        Drive           as [Drive],’                   + CHAR(10) +  
                                ‘        DB_File_Size    as [DB File Size],’            + CHAR(10) +  
                                ‘        Space_Used      as [Space Used (MB)],’         + CHAR(10) +  
                                ‘        Space_Free      as [Space Free (MB)],’         + CHAR(10) +  
                                ‘        Pct_Free        as [Pct. Free]’                + CHAR(10) +  
                                ‘FROM    DB_UTILS.dbo.vw_DBFreeSpace’                   + CHAR(10) +  
                                ‘ORDER BY Pct_Free, Name, FileName’, 
 
        @EmailFormat            = ‘TEXT’, 
        @EmailImportance        = ‘NORMAL’, 
        @EmailResultsWidth      = 150 
 
— Send Mail 
———— 
        EXEC msdb..sp_send_dbmail 
                @profile_name           = @@SERVERNAME, 
                @recipients             = @EmailAddress, 
                @subject                = @EmailSubject, 
                @body                   = @EmailMessage, 
                @query                  = @EmailQuery, 
                @body_format            = @EmailFormat, 
                @query_result_width     = @EmailResultsWidth, 
                @importance             = @EmailImportance 
GO 
 
 
— create SQL job 
—————— 
USE [msdb] 
GO 
 
IF  EXISTS (SELECT job_id FROM msdb.dbo.sysjobs_view WHERE name = N’DB_UTILS – Server and DB Space Check’) 
        EXEC msdb.dbo.sp_delete_job @job_name = N’DB_UTILS – Server and DB Space Check’, @delete_unused_schedule=1 
GO 
 
BEGIN TRANSACTION 
DECLARE @ReturnCode INT 
SELECT  @ReturnCode = 0 
 
IF NOT EXISTS (SELECT name FROM msdb.dbo.syscategories WHERE name=N’Database Maintenance’ AND category_class=1) 
BEGIN 
EXEC @ReturnCode = msdb.dbo.sp_add_category @class=N’JOB’, @type=N’LOCAL’, @name=N’Database Maintenance’ 
IF (@@ERROR <> 0 OR @ReturnCode <> 0) GOTO QuitWithRollback 
 
END 
 
DECLARE @jobId BINARY(16) 
EXEC    @ReturnCode =  msdb.dbo.sp_add_job @job_name=N’DB_UTILS – Server and DB Space Check’,  
        @enabled=1,  
        @notify_level_eventlog=2,  
        @notify_level_email=2,  
        @notify_level_netsend=0,  
        @notify_level_page=0,  
        @delete_level=0,  
        @description=N’Compiles A Size Report On All Drives And All Databases Reporting Space In-Use, Free Space, And Total Space.’,  
        @category_name=N’Database Maintenance’,  
        @owner_login_name=N’sa’,  
        @notify_email_operator_name=N’SqlAdmin’,  
        @job_id = @jobId OUTPUT 
IF (@@ERROR <> 0 OR @ReturnCode <> 0) GOTO QuitWithRollback 
 
EXEC    @ReturnCode = msdb.dbo.sp_add_jobstep @job_id=@jobId, @step_name=N’Run DB Space Report’,  
        @step_id=1,  
        @cmdexec_success_code=0,  
        @on_success_action=3,  
        @on_success_step_id=0,  
        @on_fail_action=2,  
        @on_fail_step_id=0,  
        @retry_attempts=0,  
        @retry_interval=1,  
        @os_run_priority=0, @subsystem=N’TSQL’,  
        @command=N’EXEC DB_UTILS.dbo.bp_DBandServerSpaceReport’,  
        @database_name=N’DB_UTILS’,  
        @flags=0 
IF (@@ERROR <> 0 OR @ReturnCode <> 0) GOTO QuitWithRollback 
 
EXEC    @ReturnCode = msdb.dbo.sp_add_jobstep @job_id=@jobId, @step_name=N’Email DB Space Report’,  
        @step_id=2,  
        @cmdexec_success_code=0,  
        @on_success_action=1,  
        @on_success_step_id=0,  
        @on_fail_action=2,  
        @on_fail_step_id=0,  
        @retry_attempts=0,  
        @retry_interval=1,  
        @os_run_priority=0, @subsystem=N’TSQL’,  
        @command=N’EXEC DB_UTILS.dbo.bp_DBandServerSpaceReport_SendEmail’,  
        @database_name=N’DB_UTILS’,  
        @flags=0 
IF (@@ERROR <> 0 OR @ReturnCode <> 0) GOTO QuitWithRollback 
        EXEC    @ReturnCode = msdb.dbo.sp_update_job @job_id = @jobId, @start_step_id = 1 
IF (@@ERROR <> 0 OR @ReturnCode <> 0) GOTO QuitWithRollback 
EXEC    @ReturnCode = msdb.dbo.sp_add_jobschedule @job_id=@jobId, @name=N’Weekly’,  
        @enabled=1,  
        @freq_type=8,  
        @freq_interval=1,  
        @freq_subday_type=1,  
        @freq_subday_interval=0,  
        @freq_relative_interval=0,  
        @freq_recurrence_factor=1,  
        @active_start_date=20011115,  
        @active_end_date=99991231,  
        @active_start_time=41500,  
        @active_end_time=235959 
IF (@@ERROR <> 0 OR @ReturnCode <> 0) GOTO QuitWithRollback 
        EXEC @ReturnCode = msdb.dbo.sp_add_jobserver @job_id = @jobId, @server_name = N'(local)’ 
IF (@@ERROR <> 0 OR @ReturnCode <> 0) GOTO QuitWithRollback 
        COMMIT TRANSACTION 
        GOTO EndSave 
QuitWithRollback: 
    IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION 
EndSave:

SQL Server Clustering


Introduction

A server cluster is a group of independent servers running Cluster service and working collectively as a single system. Server clusters provide high-availability, scalability, and manageability for resources and applications by grouping multiple servers running Windows® 2000 Advanced Server or Windows 2000 Datacenter Server.

The purpose of server clusters is to preserve client access to applications and resources during failures and planned outages. If one of the servers in the cluster is unavailable due to failure or maintenance, resources and applications move to another available cluster node.

For clustered systems, the term high availability is used rather than fault-tolerant, as fault tolerant technology offers a higher level of resilience and recovery. Fault-tolerant servers typically use a high degree of hardware redundancy plus specialized software to provide near-instantaneous recovery from any single hardware or software fault. These solutions cost significantly more than a clustering solution because organizations must pay for redundant hardware that waits idly for a fault. Fault-tolerant servers are used for applications that support high-value, high-rate transactions such as check clearinghouses, Automated Teller Machines (ATMs), or stock exchanges.

While Cluster service does not guarantee non-stop operation, it provides availability sufficient for most mission-critical applications. Cluster service can monitor applications and resources, automatically recognizing and recovering from many failure conditions. This provides greater flexibility in managing the workload within a cluster, and improves overall availability of the system.

Cluster service benefits include:

  • High Availability. With Cluster service, ownership of resources such as disk drives and IP addresses is automatically transferred from a failed server to a surviving server. When a system or application in the cluster fails, the cluster software restarts the failed application on a surviving server, or disperses the work from the failed node to the remaining nodes. As a result, users experience only a momentary pause in service.
  • Failback. Cluster service automatically re-balances the workload in a cluster when a failed server comes back online.
  • Manageability. You can use the Cluster Administrator to manage a cluster as a single system and to manage applications as if they were running on a single server. You can move applications to different servers within the cluster by dragging and dropping cluster objects. You can move data to different servers in the same way. This can be used to manually balance server workloads and to unload servers for planned maintenance. You can also monitor the status of the cluster, all nodes and resources from anywhere on the network.
  • Scalability. Cluster services can grow to meet rising demands. When the overall load for a cluster-aware application exceeds the capabilities of the cluster, additional nodes can be added.

This paper provides instructions for installing Cluster service on servers running Windows 2000 Advanced Server and Windows 2000 Datacenter Server. It describes the process of installing the Cluster service on cluster nodes. It is not intended to explain how to install cluster applications, but rather to guide you through the process of installing a typical, two-node cluster itself.

********************************************************************************************************

 

What is Clustering?

 
A Microsoft SQL Server Cluster is nothing more than a collection of two or more physical servers with identical access to shared storage that provides the disk resources required to store the database files. 

These servers are referred to as “nodes”.  Each of the nodes talk to one another via a private network, sending a heartbeat signal between them.  Should one node not communicate its heartbeat to the other node in the cluster the secondary node will take ownership of any dependent services being run by the node that lost communication.  This process is referred to as “failover”. 

A failover can occur both automatically (a server’s heartbeat stops communicating) or manually.  A manual failover is beneficial in the event that patching or some other form of maintenance is required at the physical server level.  You would typically implement clustering in order to ensure that if you ever encounter hardware failure on the physical server hosting your SQL instance, your databases would continue to be available for dependent applications and their users. 

Unlike other clustering technologies that are implemented for better performance or for increased processing power via load-balancing, SQL clusters are designed for providing highly-available databases; eliminating downtime associated with hardware failure.  This architectural concept is referred to as “High Availability Clustering” or “HA Clustering” for short.  The service or groups of services that are hosted on a clustered node are respectively referred to as resources and resource groups.  Since these resources must be available to all nodes in a cluster then they must reside on a shared disk array in the form of SAN-NAS disk.  Each resource group will be mapped to a logical drive that is physically hosted on the shared disk array and will also have it’s own associated IP address and network name. 

The SQL Server Installation Process on a Cluster

 
The SQL Server installation process detects when an installation is being attempted on a clustered node and will prompt you as to whether you wish to configure the SQL instance as clustered or not.  If you proceed with creating a clustered instance of SQL Server, the instance will be hosted on a “Virtual” Server.  Resources such as data and log files will be created on the shared SAN-NAS disk for SQL Server, SQL Server Agent, and Full-Text Indexing.

If selected in the installation process, Notification Services and Analysis Services are also cluster-aware in SQL Server 2005.  Conversely, the associated program files for the instance will be installed on the local drives of each of the clustered nodes in an identical fashion and registry values are set identically across all clustered nodes.  Since the “Virtual” server resides solely on the SAN it can be “owned” by any of the nodes you allow.  Each of the nodes can run these resources in identical fashion because each physical server/node has the program files and identical registry settings necessary to run the SQL instance. 

Furthermore, the users are oblivious to the underlying fluidity of the server.  They connect to it as they would any other physical server:  by server name (virtual server name in this case) if the default instance or by virtual server name\instance name if a named instance.  This is key for application connectivity.  Since the SQL instance simply changes ownership during a failover, connection strings the applications rely on to connect to their databases need not be recoded; the physical server may become unavailable, but the virtual server persists after the failover.

 

Active/Active or Active/Passive Clustering

 
Clusters are often referred to as either Active/Active or Active/Passive.  Just as you would expect by the name, in an Active/Active cluster there will be two or more nodes, each one owning an instance of Microsoft SQL Server.  If one node fails, the instance it owns would fail over to the other node, running along side (and contending for resources with) the other instance.  An Active/Passive architecture  requires that no matter how many nodes make up the cluster, at least one node is not the owner of an instance of SQL Server.  It is “passive” and only exists to accept a failover of a node hosting a SQL instance in the event of a failover. 

Current Microsoft licensing policies require you to only license the active nodes running Microsoft SQL Server.  The passive node need not be licensed.

 

How Many Nodes?


Today’s clustering technology under Windows 2003 and Microsoft SQL Server 2005 Enterprise Edition allows for up to eight nodes to be combined into a single cluster.  The release of Windows 2008 and Microsoft SQL Server 2008 Enterprise Edition will bring with it the ability to double that to sixteen nodes.  (You are limited to two nodes if you utilize SQL Server Standard Edition.)  Do you want to cluster multiple databases of various SLAs within many nodes on a single cluster?  Is it beneficial to dedicate a two-node cluster to a single database?  The answer is:  “It Depends.”  We look into this in detail in a future tip.

 

 

Pros and Cons

 
While clustering protects you from hardware failure relating to the server hosting the SQL Server instance, it does not protect you from media failure.  Unlike replication, database mirroring, or log shipping there is only a single copy of your database.  If the SAN-NAS encounters a failure then you could not only conceivably incur downtime, but possibly data loss.  It is recommended that you incorporate redundancy of your SAN-NAS or database mirroring with your clustering configuration to protect you from media failures.  Hardware and licensing costs may be high.  In an Active/Passive clustering model you’ll purchase hardware you hope to never need to use.  The cluster build is more complex than a standalone server setup.  The physical build of the cluster is outside the scope of this discussion however.  Additional benefits for clustering include simplicity for installation of SQL and ease of administration and maintenance.

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Setting up Networks

Note: For this section, power down all shared storage devices and then power up all nodes. Do not let both nodes access the shared storage devices at the same time until the Cluster service is installed on at least one node and that node is online.

Each cluster node requires at least two network adapters—one to connect to a public network, and one to connect to a private network consisting of cluster nodes only.

The private network adapter establishes node-to-node communication, cluster status signals, and cluster management. Each node’s public network adapter connects the cluster to the public network where clients reside.

Verify that all network connections are correct, with private network adapters connected to other private network adapters only, and public network adapters connected to the public network. The connections are illustrated in Figure 1 below. Run these steps on each cluster node before proceeding with shared disk setup.

Figure 1: Example of two-node cluster (clusterpic.vsd)

Figure 1: Example of two-node cluster (clusterpic.vsd)

Configuring the Private Network Adapter

Perform these steps on the first node in your cluster.

  1. Right-click My Network Places and then click Properties.
  2. Right-click the Local Area Connection 2 icon.Note: Which network adapter is private and which is public depends upon your wiring. For the purposes of this document, the first network adapter (Local Area Connection) is connected to the public network, and the second network adapter (Local Area Connection 2) is connected to the private cluster network. This may not be the case in your network.
  3. Click Status. The Local Area Connection 2 Status window shows the connection status, as well as the speed of connection. If the window shows that the network is disconnected, examine cables and connections to resolve the problem before proceeding. Click Close.
  4. Right-click Local Area Connection 2 again, click Properties, and click Configure.
  5. Click Advanced. The window in Figure 2 should appear.
  6. Network adapters on the private network should be set to the actual speed of the network, rather than the default automated speed selection. Select your network speed from the drop-down list. Do not use an Auto-select setting for speed. Some adapters may drop packets while determining the speed. To set the network adapter speed, click the appropriate option such as Media Type or Speed.Bb727114.cluste02(en-us,TechNet.10).gif
    Figure 2: Advanced Adapter Configuration (advanced.bmp)

    All network adapters in the cluster that are attached to the same network must be identically configured to use the same Duplex Mode, Flow Control, Media Type, and so on. These settings should remain the same even if the hardware is different.

    Note: We highly recommend that you use identical network adapters throughout the cluster network.

  7. Click Transmission Control Protocol/Internet Protocol (TCP/IP).
  8. Click Properties.
  9. Click the radio-button for Use the following IP address and type in the following address: 10.1.1.1. (Use 10.1.1.2 for the second node.)
  10. Type in a subnet mask of 255.0.0.0.
  11. Click the Advanced radio button and select the WINS tab. Select Disable NetBIOS over TCP/IP. Click OK to return to the previous menu. Do this step for the private network adapter only.The window should now look like Figure 3 below.

    Bb727114.cluste03(en-us,TechNet.10).gif

    Figure 3: Private Connector IP Address (ip10111.bmp)

 

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

 

Configuring the Public Network Adapter

Note: While the public network adapter’s IP address can be automatically obtained if a DHCP server is available, this is not recommended for cluster nodes. We strongly recommend setting static IP addresses for all network adapters in the cluster, both private and public. If IP addresses are obtained via DHCP, access to cluster nodes could become unavailable if the DHCP server goes down. If you must use DHCP for your public network adapter, use long lease periods to assure that the dynamically assigned lease address remains valid even if the DHCP service is temporarily lost. In all cases, set static IP addresses for the private network connector. Keep in mind that Cluster service will recognize only one network interface per subnet. If you need assistance with TCP/IP addressing in Windows 2000, please see Windows 2000 Online Help.

Rename the Local Area Network Icons

We recommend changing the names of the network connections for clarity. For example, you might want to change the name of Local Area Connection (2) to something like Private Cluster Connection. The naming will help you identify a network and correctly assign its role.

  1. Right-click the Local Area Connection 2 icon.
  2. Click Rename.
  3. Type Private Cluster Connection into the textbox and press Enter.
  4. Repeat steps 1-3 and rename the public network adapter as Public Cluster Connection.Bb727114.cluste04(en-us,TechNet.10).gif
    Figure 4: Renamed connections (connames.bmp)
  5. The renamed icons should look like those in Figure 4 above. Close the Networking and Dial-up Connections window. The new connection names automatically replicate to other cluster servers as they are brought online.

Verifying Connectivity and Name Resolution

To verify that the private and public networks are communicating properly, perform the following steps for each network adapter in each node. You need to know the IP address for each network adapter in the cluster. If you do not already have this information, you can retrieve it using the ipconfig command on each node:

  1. Click Start, click Run and type cmd in the text box. Click OK.
  2. Type ipconfig /all and press Enter. IP information should display for all network adapters in the machine.
  3. If you do not already have the command prompt on your screen, click Start, click Run and typing cmd in the text box. Click OK.
  4. Type ping ipaddress where ipaddress is the IP address for the corresponding network adapter in the other node. For example, assume that the IP addresses are set as follows:
    Node Network Name Network Adapter IP Address
    1 Public Cluster Connection 172.16.12.12.
    1 Private Cluster Connection 10.1.1.1
    2 Public Cluster Connection 172.16.12.14
    2 Private Cluster Connection 10.1.1.2

In this example, you would type ping 172.16.12.14 and ping 10.1.1.2 from Node 1, and you would type ping 172.16.12.12 and 10.1.1.1 from Node 2.

To verify name resolution, ping each node from a client using the node’s machine name instead of its IP number. For example, to verify name resolution for the first cluster node, type ping hq-res-dc01 from any client.

Verifying Domain Membership

All nodes in the cluster must be members of the same domain and able to access a domain controller and a DNS Server. They can be configured as member servers or domain controllers. If you decide to configure one node as a domain controller, you should configure all other nodes as domain controllers in the same domain as well. In this document, all nodes are configured as domain controllers.

Note: See More Information at the end of this document for links to additional Windows 2000 documentation that will help you understand and configure domain controllers, DNS, and DHCP.

  1. Right-click My Computer, and click Properties.
  2. Click Network Identification. The System Properties dialog box displays the full computer name and domain. In our example, the domain name is reskit.com.
  3. If you are using member servers and need to join a domain, you can do so at this time. Click Properties and following the on-screen instructions for joining a domain.
  4. Close the System Properties and My Computer windows.

Setting Up a Cluster User Account

The Cluster service requires a domain user account under which the Cluster service can run. This user account must be created before installing Cluster service, because setup requires a user name and password. This user account should not belong to a user on the domain.

  1. Click Start, point to Programs, point to Administrative Tools, and click Active Directory Users and Computers
  2. Click the + to expand Reskit.com (if it is not already expanded).
  3. Click Users.
  4. Right-click Users, point to New, and click User.
  5. Type in the cluster name as shown in Figure 5 below and click Next.Bb727114.cluste05(en-us,TechNet.10).gif
    Figure 5: Add Cluster User (clusteruser.bmp)
  6. Set the password settings to User Cannot Change Password and Password Never Expires. Click Next and then click Finish to create this user.Note: If your administrative security policy does not allow the use of passwords that never expire, you must renew the password and update the cluster service configuration on each node before password expiration.
  7. Right-click Cluster in the left pane of the Active Directory Users and Computers snap-in. Select Properties from the context menu.
  8. Click Add Members to a Group.
  9. Click Administrators and click OK. This gives the new user account administrative privileges on this computer.
  10. Close the Active Directory Users and Computers snap-in.

Setting Up Shared Disks

Warning: Make sure that Windows 2000 Advanced Server or Windows 2000 Datacenter Server and the Cluster service are installed and running on one node before starting an operating system on another node. If the operating system is started on other nodes before the Cluster service is installed, configured and running on at least one node, the cluster disks will probably be corrupted.

To proceed, power off all nodes. Power up the shared storage devices and then power up node one.

About the Quorum Disk

The quorum disk is used to store cluster configuration database checkpoints and log files that help manage the cluster. We make the following quorum disk recommendations:

  • Create a small partition (min 50MB) to be used as a quorum disk. We generally recommend a quorum disk to be 500MB.)
  • Dedicate a separate disk for a quorum resource. As the failure of the quorum disk would cause the entire cluster to fail, we strongly recommend you use a volume on a RAID disk array.

During the Cluster service installation, you must provide the drive letter for the quorum disk. In our example, we use the letter Q.

Configuring Shared Disks

  1. Right click My Computer, click Manage, and click Storage.
  2. Double-click Disk Management
  3. Verify that all shared disks are formatted as NTFS and are designated as Basic. If you connect a new drive, the Write Signature and Upgrade Disk Wizard starts automatically. If this happens, click Next to go through the wizard. The wizard sets the disk to dynamic. To reset the disk to Basic, right-click Disk # (where # specifies the disk you are working with) and click Revert to Basic Disk.Right-click unallocated disk space
    1. Click Create Partition…
    2. The Create Partition Wizard begins. Click Next twice.
    3. Enter the desired partition size in MB and click Next.
    4. Accept the default drive letter assignment by clicking Next.
    5. Click Next to format and create partition.

Assigning Drive Letters

After the bus, disks, and partitions have been configured, drive letters must be assigned to each partition on each clustered disk.

Note: Mountpoints is a feature of the file system that allows you to mount a file system using an existing directory without assigning a drive letter. Mountpoints is not supported on clusters. Any external disk used as a cluster resource must be partitioned using NTFS partitions and must have a drive letter assigned to it.

  1. Right-click the desired partition and select Change Drive Letter and Path.
  2. Select a new drive letter.
  3. Repeat steps 1 and 2 for each shared disk.Bb727114.cluste06(en-us,TechNet.10).gif
    Figure 6: Disks with Drive Letters Assigned (drives.bmp)
  4. When finished, the Computer Management window should look like Figure 6 above. Now close the Computer Management window.

Verifying Disk Access and Functionality

  1. Click Start, click Programs, click Accessories, and click Notepad.
  2. Type some words into Notepad and use the File/Save As command to save it as a test file called test.txt. Close Notepad.
  3. Double-click the My Documents icon.
  4. Right-click test.txt and click Copy
  5. Close the window.
  6. Double-click My Computer.
  7. Double-click a shared drive partition.
  8. Click Edit and click Paste.
  9. A copy of the file should now reside on the shared disk.
  10. Double-click test.txt to open it on the shared disk. Close the file.
  11. Highlight the file and press the Del key to delete it from the clustered disk.

Repeat the process for all clustered disks to verify they can be accessed from the first node.

At this time, shut down the first node, power on the second node and repeat the Verifying Disk Access and Functionality steps above. Repeat again for any additional nodes. When you have verified that all nodes can read and write from the disks, turn off all nodes except the first, and continue with this guide.

Install Cluster Service software

Configuring the First Node

Note: During installation of Cluster service on the first node, all other nodes must either be turned off, or stopped prior to Windows 2000 booting. All shared storage devices should be powered up.

In the first phase of installation, all initial cluster configuration information must be supplied so that the cluster can be created. This is accomplished using the Cluster Service Configuration Wizard.

  1. Click Start, click Settings, and click Control Panel.
  2. Double-click Add/Remove Programs.
  3. Double-click Add/Remove Windows Components .
  4. Select Cluster Service. Click Next.
  5. Cluster service files are located on the Windows 2000 Advanced Server or Windows 2000 Datacenter Server CD-ROM. Enter x:\i386 (where x is the drive letter of your CD-ROM). If Windows 2000 was installed from a network, enter the appropriate network path instead. (If the Windows 2000 Setup flashscreen displays, close it.) Click OK.
  6. Click Next.
  7. The window shown in Figure 7 below appears. Click I Understand to accept the condition that Cluster service is supported on hardware from the Hardware Compatibility List only.Bb727114.cluste07(en-us,TechNet.10).gif
    Figure 7: Hardware Configuration Certification Screen (hcl.bmp)
  8. Because this is the first node in the cluster, you must create the cluster itself. Select The first node in the cluster, as shown in Figure 8 below and then click Next.Bb727114.cluste08(en-us,TechNet.10).gif
    Figure 8: Create New Cluster (clustcreate.bmp)
  9. Enter a name for the cluster (up to 15 characters), and click Next. (In our example, we name the cluster MyCluster.)
  10. Type the user name of the cluster service account that was created during the pre-installation. (In our example, this user name is cluster.) Leave the password blank. Type the domain name, and click Next.Note: You would normally provide a secure password for this user account.

    At this point the Cluster Service Configuration Wizard validates the user account and password.

  11. Click Next.

Configuring Cluster Disks

Note: By default, all SCSI disks not residing on the same bus as the system disk will appear in the Managed Disks list. Therefore, if the node has multiple SCSI buses, some disks may be listed that are not to be used as shared storage (for example, an internal SCSI drive.) Such disks should be removed from the Managed Disks list.

  1. The Add or Remove Managed Disks dialog box shown in Figure 9 specifies which disks on the shared SCSI bus will be used by Cluster service. Add or remove disks as necessary and then click Next.Bb727114.cluste09(en-us,TechNet.10).gif
    Figure 9: Add or Remove Managed Disks (manageddisks.bmp)

    Note that because logical drives F: and G: exist on a single hard disk, they are seen by Cluster service as a single resource. The first partition of the first disk is selected as the quorum resource by default. Change this to denote the small partition that was created as the quorum disk (in our example, drive Q). Click Next.

    Note: In production clustering scenarios you must use more than one private network for cluster communication to avoid having a single point of failure. Cluster service can use private networks for cluster status signals and cluster management. This provides more security than using a public network for these roles. You can also use a public network for cluster management, or you can use a mixed network for both private and public communications. In any case, make sure at least two networks are used for cluster communication, as using a single network for node-to-node communication represents a potential single point of failure. We recommend that multiple networks be used, with at least one network configured as a private link between nodes and other connections through a public network. If you have more than one private network, make sure that each uses a different subnet, as Cluster service recognizes only one network interface per subnet.

    This document is built on the assumption that only two networks are in use. It shows you how to configure these networks as one mixed and one private network.

    The order in which the Cluster Service Configuration Wizard presents these networks may vary. In this example, the public network is presented first.

  2. Click Next in the Configuring Cluster Networks dialog box.
  3. Make sure that the network name and IP address correspond to the network interface for the public network.
  4. Check the box Enable this network for cluster use.
  5. Select the option All communications (mixed network) as shown in Figure 10 below.
  6. Click Next.Bb727114.cluste10(en-us,TechNet.10).gif
    Figure 10: Public Network Connection (pubclustnet.bmp)
  7. The next dialog box shown in Figure 11 configures the private network. Make sure that the network name and IP address correspond to the network interface used for the private network.
  8. Check the box Enable this network for cluster use.
  9. Select the option Internal cluster communications only.Bb727114.cluste11(en-us,TechNet.10).gif
    Figure 11: Private Network Connection (privclustnet.bmp)
  10. Click Next.
  11. In this example, both networks are configured in such a way that both can be used for internal cluster communication. The next dialog window offers an option to modify the order in which the networks are used. Because Private Cluster Connection represents a direct connection between nodes, it is left at the top of the list. In normal operation this connection will be used for cluster communication. In case of the Private Cluster Connection failure, cluster service will automatically switch to the next network on the list—in this case Public Cluster Connection. Make sure the first connection in the list is the Private Cluster Connection and click Next.Important: Always set the order of the connections so that the Private Cluster Connection is first in the list.
  12. Enter the unique cluster IP address (172.16.12.20) and Subnet mask (255.255.252.0), and click Next.Bb727114.cluste12(en-us,TechNet.10).gif
    Figure 12: Cluster IP Address (clusterip.bmp)

    The Cluster Service Configuration Wizard shown in Figure 12 automatically associates the cluster IP address with one of the public or mixed networks. It uses the subnet mask to select the correct network.

  13. Click Finish to complete the cluster configuration on the first node.The Cluster Service Setup Wizard completes the setup process for the first node by copying the files needed to complete the installation of Cluster service. After the files are copied, the Cluster service registry entries are created, the log files on the quorum resource are created, and the Cluster service is started on the first node.

    A dialog box appears telling you that Cluster service has started successfully.

  14. Click OK.
  15. Close the Add/Remove Programs window.

Validating the Cluster Installation

Use the Cluster Administrator snap-in to validate the Cluster service installation on the first node.

  1. Click Start, click Programs, click Administrative Tools, and click Cluster Administrator.Bb727114.cluste13(en-us,TechNet.10).gif
    Figure 13: Cluster Administrator (1nodeadmin.bmp)

    If your snap-in window is similar to that shown above in Figure 13, your Cluster service was successfully installed on the first node. You are now ready to install Cluster service on the second node.

Configuring the Second Node

Note: For this section, leave node one and all shared disks powered on. Power up the second node.

Installing Cluster service on the second node requires less time than on the first node. Setup configures the Cluster service network settings on the second node based on the configuration of the first node.

Installation of Cluster service on the second node begins exactly as for the first node. During installation of the second node, the first node must be running.

Follow the same procedures used for installing Cluster service on the first node, with the following differences:

  1. In the Create or Join a Cluster dialog box, select The second or next node in the cluster, and click Next.
  2. Enter the cluster name that was previously created (in this example, MyCluster), and click Next.
  3. Leave Connect to cluster as unchecked. The Cluster Service Configuration Wizard will automatically supply the name of the user account selected during the installation of the first node. Always use the same account used when setting up the first cluster node.
  4. Enter the password for the account (if there is one) and click Next.
  5. At the next dialog box, click Finish to complete configuration.
  6. The Cluster service will start. Click OK.
  7. Close Add/Remove Programs.

If you are installing additional nodes, repeat these steps to install Cluster service on all other nodes.

Verify Installation

There are several ways to verify a successful installation of Cluster service. Here is a simple one:

  1. Click Start, click Programs, click Administrative Tools, and click Cluster Administrator.Bb727114.cluste14(en-us,TechNet.10).gif
    Figure 14: Cluster Resources (clustadmin.bmp)

    The presence of two nodes (HQ-RES-DC01 and HQ-RES-DC02 in Figure 14 above) shows that a cluster exists and is in operation.

  2. Right Click the group Disk Group 1 and select the option Move. The group and all its resources will be moved to another node. After a short period of time the Disk F: G: will be brought online on the second node. If you watch the screen, you will see this shift. Close the Cluster Administrator snap-in.

Congratulations. You have completed the installation of Cluster service on all nodes. The server cluster is fully operational. You are now ready to install cluster resources like file shares, printer spoolers, cluster aware services like IIS, Message Queuing, Distributed Transaction Coordinator, DHCP, WINS, or cluster aware applications like Exchange or SQL Server.

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Clustering SQL Server 2005 (Installation)

 

When the installation process does begin, the setup program recognizes all the nodes, and once you give it the go ahead to install on each one, it does, all automatically. SQL Server 2005 binaries are installed on the local drive of each node, and the system databases are stored on the shared array you designate.

In the next section are the step-by-steps instructions for installing a SQL Server 2005 instance in a cluster. The assumption for this example is that you will be installing this instance in a 2-node active/passive cluster. Even if you will be installing a 2-node active/active or a multi-node cluster, the steps in this section are virtually the same. The only real difference is that you will have to run SQL Server 2005 setup for every instance you want to install on the cluster, and you will have to specify a different logical drive on the shared array.

Clustering SQL Server

To begin installing your SQL Server 2005 cluster, you will need the installation CD or DVD. You can either install it directly from the media, or copy the install files from the media to the current active node of the cluster, and run the setup program from there.

To begin the installation, run Setup.exe. After an introductory screen, you will get the first install dialog box as shown in the figure below.

The Installing Prerequisites dialog box lists the prerequisites that need to be installed before installation of SQL Server 2005 can begin. The number of components may vary from the above figure, depending on what you have already installed on your nodes. What is interesting to note here is that these prerequisite components will only be installed immediately on the active node. They will be installed on the passive node later during the installation process. This is done automatically and you don’t have to worry about it.

Click Install to install these components. When completed, you will get a dialog box telling you that they were installed successfully, and then you can the click Next to proceed. On occasion, I have seen these components fail to install correctly. If this happens, you will have to troubleshoot the installation. Generally speaking, try rebooting both nodes of the cluster and try installing them again. This often fixes whatever caused the first setup try to fail.

Once the prerequisite components have been successfully installed, the SQL Server Installation Wizard launches, as you can see in the figure below.

Click Next to proceed.

The next step is for the SQL Server Installation Wizard to perform a System Configuration Check. This is very similar to the check that was performed with clustering services when you installed Windows 2003 Clustering. Ideally, you want all checks to be successful, with a green icon. If you get any yellow warning or red error icons, then you need to find out what the problem is, and correct it before proceeding.

In some cases, yellow warning icons can be ignored, but red error icons cannot. If you have any yellow or red icons, you may have to abort the setup process, fix the problem, then restart the setup process. Assuming all is well, click Next to proceed.

The next dialog box is Registration, where you enter your company name and license key, if applicable.

Next, you must select the SQL Server 2005 components to install. See below.

I want to point out the options to “Create a SQL Server failover cluster” and to “Create an Analysis Server failover cluster” (currently grayed out). Since we are creating a SQL Server 2005 cluster, you must select the “Create a SQL Server failover cluster.” If you are going to install Analysis Services (not covered in this example) then you must select “Create an Analysis Server failover cluster.” Once you have selected all the components you need to include, click Next.

As with any install of SQL Server 2005, the next step is to select the name of the instance to be installed. You can choose between a default instance and a named instance. Click Next to proceed.

Now, here is a very important step. This is when you enter the name of the virtual SQL Server 2005 instance you are currently installing. This is the name that clients will use to connect to this instance. Ideally, you have already selected a name to use that makes the most sense to your organization. Click Next to proceed. If you ever need to change this virtual name, you will have to uninstall and then reinstall SQL Server 2005 clustering.

This is also a very important step. This is where you enter the virtual IP address for this instance of SQL Server 2005. Like the cluster virtual name, it is used by clients to connect to this instance of SQL Server 2005. The IP address must belong to the same subnet as the IP addresses used by all of the nodes.

In addition, in this dialog box you must select the network to be used for the public network—the network used by the clients to connect to this instance of SQL Server 2005. All of the available networks will be listed in the drop-down box next to Network to use. If you have named the public and private networks Public and Private, respectively, it will be very easy for you to select the correct network, as I have above.

Once you have entered the IP address and selected the public network, click on Add, so that the information you just selected is in the Selected networks and IP addresses box. Then click Next.

In this dialog box, select the SQL Server Group as the group where you want to create the SQL Server resources. In addition, be sure that the Data files will be created on the correct logical drive of the shared array using the folder name you choose. Click Next to proceed.

Now, you specify which nodes you want to install this instance of SQL Server on. Because our example is for only two nodes, the default setting works for us. Notice that under Required node is SQL2005A, which is the name of the physical node where we are running the setup program. And under Selected nodes is SQL2005B, the second physical node in our 2-node cluster. Click Next to proceed.

In this dialog box, we must select an account (with password) that has administrative rights on all of the nodes where we want to install this instance of SQL Server 2005. This can be any domain account that is a local administrator of all the nodes. Click Next to proceed.

The Service Account dialog box is identical to the one you see when you install SQL Server 2005 on a non-cluster, and it is configured the same. Click Next to proceed.

In this dialog box, you must select pre-existing global domain groups that are used to contain the startup account for each clustered service. You can choose to add all three services to the same global domain group, or to create separate global domain groups, one for each service, as has been done above. Once you have selected appropriate domain groups, click Next to proceed.

The next four dialog boxes of the Installation Wizard, not shown here, are the same as for any other installation of SQL Server 2005. After you have completed these steps, the installation of this instance of SQL Server 2005 begins, and you see the following dialog box.

The installation process will take some time as it is installing the binaries on both nodes of the cluster, and installing the system data files on the shared array. The Setup Progress step shows the status of the first node’s install. If you want to see the status of the second node’s install, you can change the drop-down box next to Node to the second node and watch its progress.

As the installation proceeds, you will want to see all green icons next to each installation step. If any step should fail, then the entire installation process will need to be rolled back, any problems fixed, and SQL Server 2005 installed fresh. In most cases, cancelling a bad installation will uninstall what has already been installed, but not always.

Sometimes, if the installation breaks, it just dies and a rollback of what has been done so far will not occur. If this is the case you can either choose to reinstall on top of the existing bad install (which often does not work), manually uninstall the failed installation (check Microsoft’s Web site for assistance in this area), or rebuild your cluster from scratch (starting with the operating system).

It the install was a success, you will see a final dialog box, where you can click Finish. SQL Server 2005 clustering had now been successfully installed on the two cluster nodes.

Clustering Analysis Services

SQL Server 2005 Analysis Services can be clustered just like SQL Server 2005, and in fact, is installed using the same setup program used to install SQL Server 2005. Below are some points to keep in mind if you should decide to cluster SQL Server 2005 Analysis Services.

  • SQL Server 2005 Analysis Services can be installed by itself, or with SQL Server 2005. Because some of the features of Analysis Services require components of the SQL Server 2005 database engine, it is generally a good idea to install both of them in your cluster.
  • SQL Server 2005 Analysis Services is installed using the same setup program as SQL Server 2005. When running the setup program, you select, or not select, Analysis Services to be installed in the “Components to Install” screen.
  • Because SQL Server 2005 Analysis Services needs to store program files, data files, and shared files, you will have to specify the location of your shared array, where they will reside. These files must reside on a shared array if you want Analysis Services to run after a failover. To specify the location of the shared array, you must select the “Advanced” button from the “Components to Install” screen in the setup wizard.

Other than the above, installing SQL Server 2005 Analysis Services in a cluster is virtually identical to installing SQL Server 2005 in a cluster.

Installing the Service Pack and Hot Fixes

Once you have installed SQL Server 2005 clustering, your next step is to install the latest SQL Server 2005 service pack and hot fixes, which can be downloaded from Microsoft’s Web site. Installing a service pack or hot fix is fairly straightforward because they are cluster-aware. Once the service pack or hot fix setup program is started, it detects that you have a cluster and will upgrade all nodes simultaneously. Once setup is complete, you may need to reboot your servers and failover the nodes. Generally, once I have run the service pack, I like to reboot the active node first. Once it has rebooted, then I reboot the passive node. This way, failover and failback is automatic.

Checking the SQL Server 2005 Installation From Cluster Administrator

Once an instance of SQL Server 2005 clustering is installed, you can view its cluster resources by going to Cluster Administrator and opening up the SQL Server Group resource, as shown below.

This figure shows the cluster resources for the SQL Server 2005 cluster we just built. We see all of the names of the resources, their state, and which node the resources are running on. As I have already mentioned, Cluster Administrator is a great tool for seeing if all the resources are up and running and which node is the current active node.

Here is a brief rundown on each of the SQL Server 2005 cluster resources:

  • Disk F: This is the shared disk array where the SQL Server data files and logs are stored.
  • SQL Network Name (sqlcluster): This is the virtual SQL Server name used by clients to connect to this clustered instance of SQL Server 2005. The name “sqlcluster” is the name I have assigned this cluster instance, and will not be the same as your cluster, unless you name yours the same as mine.
  • SQL IP Address (sqlcluster): This is the virtual SQL Server IP address used by clients to connect to this clustered instance of SQL Server 2005. Again, the name “sqlcluster” is the name of the virtual server, and is the one I have used for this cluster. Your name will most likely be different.
  • SQL Server: This is the SQL Server service.
  • SQL Server Agent: This is the SQL Server Agent service.
  • SQL Server FullText: This is the SQL Server FullText service. Even though you may not use this service, it is automatically installed as a cluster resource.

Manually Initiate a Failover in Cluster Administrator

This test is also performed from Cluster Administrator. Select any of the resources found in the SQL Server Group resource group (not the group itself), right-click on it, and select Initiate Failure. Because the cluster service always tries to recover up to three times from a failover, if it can, you will have to select this option four times before a test failover is initiated.

As above, after the first failover, check for any problems, then failback using the same procedure. Then check again for problems.

Manually Failover Nodes by Turning Them Off

Turn off the active node. Once this happens, watch the failover in Cluster Administrator and the clients. As before, check for any problems. Next, turn on the node and wait until it boots back up successfully. Then turn off the now current active node by turning it off hard. And again, watch the failover in Cluster Administrator and the clients, and check for problems. Turn the node back on when done.

Manually Failover Nodes by Breaking the Public Network Connections

Unplug the public network connection from the active node. This will cause a failover to a passive node, which you can watch in Cluster Administrator and the clients. Check for any problems. Now, plug the public network connection back into the server. And unplug the public network connection from the now active node. This will cause a failover to the current passive node, which you can watch in Cluster Administrator. And again, watch the failover in Cluster Administrator and the clients, and check for problems. Once the testing is complete, plug the network connection back into the server.

Manually Failover Nodes by Breaking the Shared Array Connection

From the active node, remove the shared array connection. This will cause a failover, which you can watch in Cluster Administrator and the clients. Check for any problems. Next, reconnect the broken connection from the now active node, and remove the shared array connection. Watch the failover in Cluster Administrator. And again, watch the failover in Cluster Administrator and the clients, and check for problems. When done, reconnect the broken connection.

If you pass all of these tests the first time, it would almost be a miracle. But I do believe in miracles. If you run into problems, you have to figure them out.

Ready for Production

Once you have successfully tested your production SQL Server 2005 cluster, you are ready to go into production. If you have time, you might want to consider running the cluster in test mode for a while, “playing” with it to learn more about how it works under various conditions. But even if you don’t have any extra time to “play” with your cluster, it should be ready to go into production.

Disable and Enable Foreign Keys on all Tables in a database


—————— One Method ——————————-

before dropping replace the DROP with create, so that you can put the FK’s back

 — change the o/p to text mode

use [dbname]

 go

SELECT ‘alter table ‘+user_name(uid)+’.’+object_name(parent_obj)+’ DROP CONSTRAINT ‘+name,char(13)+char(10)+’go’ from sysobjects where xtype = ‘F’

go

_________________________________________________________________________________________

———————– Second Method ————————————–

 

——1) Generate Add FK Statements.

SET NOCOUNT ON

—Create table to insert the proc values

IF OBJECT_ID(‘dbo.fkeys’, ‘U’) IS NOT NULL
DROP TABLE dbo.fkeys
go
CREATE TABLE dbo.fkeys (c1 varchar(8000))
GO

——- Generate Adds for All Foreign Keys in Database

DECLARE @fkName varchar(800), @tabName varchar(800), @refName varchar(800)
DECLARE @isDel int, @isUpd int, @fkCol varchar(8000), @refCol varchar(8000)
DECLARE @pline varchar(8000), @fline varchar(8000)
set @fline = ”
DECLARE fkCursor CURSOR FOR
select distinct object_name(constid), object_name(fkeyid),
object_name(rkeyid),
OBJECTPROPERTY ( constid , ‘CnstIsDeleteCascade’ ),
OBJECTPROPERTY ( constid , ‘CnstIsUpdateCascade’ )
from sysforeignkeys k
order by object_name(fkeyid)

OPEN fkCursor

FETCH NEXT FROM fkCursor
INTO @fkName, @tabName, @refName, @isDel, @isUpd

WHILE @@FETCH_STATUS = 0
BEGIN
select @fkCol = NULL
SELECT @fkCol = ISNULL(@fkCol + ‘, ‘,”) + ‘[‘ + col_name(fkeyid, fkey) + ‘]’
from sysforeignkeys
where object_name(constid) = @fkName
order by keyno

select @refCol = NULL
SELECT @refCol = ISNULL(@refCol + ‘, ‘,”) + ‘[‘ + col_name(rkeyid, rkey) + ‘]’
from sysforeignkeys
where object_name(constid) = @fkName
order by keyno

select @pline = ‘ALTER TABLE [dbo].[‘ + @tabName +
‘] ADD CONSTRAINT [‘ + @fkName + ‘]’ +
CHAR(13) + CHAR(10) +
‘ FOREIGN KEY (‘ + @fkCol + ‘) REFERENCES [dbo].[‘ + @refName +
‘] (‘ + @refCol + ‘)’
if @isDel = 1
select @pline = @pline + CHAR(13) + CHAR(10) +
‘ ON DELETE CASCADE’
if @isUpd = 1
select @pline = @pline + CHAR(13) + CHAR(10) +
‘ ON UPDATE CASCADE’
select @pline = @pline + CHAR(13) + CHAR(10)
set @fline=@fline + @pline
INSERT INTO fkeys VALUES(@pline)

FETCH NEXT FROM fkCursor
INTO @fkName, @tabName, @refName, @isDel, @isUpd
END
CLOSE fkCursor
DEALLOCATE fkCursor

———2) DROP FK’s

DECLARE @LINE VARCHAR(MAX)
DECLARE fkCursor CURSOR FOR
select distinct ‘ALTER TABLE [dbo].[‘ + object_name(fkeyid) +
‘] DROP CONSTRAINT ‘ + object_name(constid) +
CHAR(13) + CHAR(10) + ‘ ‘
from sysforeignkeys

OPEN fkCursor

FETCH NEXT FROM fkCursor
INTO @LINE

WHILE @@FETCH_STATUS = 0
BEGIN
–PRINT @LINE
EXEC (@LINE)

FETCH NEXT FROM fkCursor
INTO @LINE
END

CLOSE fkCursor
DEALLOCATE fkCursor

3) EXEX generate add fk’s

—————- exec the generated add fk statements

DECLARE @sql varchar(8000), @stmts varchar(8000)
–set @sql = ”
DECLARE fkCursor CURSOR FOR
select * from fkeys

OPEN fkCursor

FETCH NEXT FROM fkCursor
INTO @stmts

WHILE @@FETCH_STATUS = 0
BEGIN
–print @stmts
exec (@stmts)
FETCH NEXT FROM fkCursor
INTO @stmts
END
CLOSE fkCursor
DEALLOCATE fkCursor

 

Executing SSIS Package as SQL Server JOB and Schedule


 Executing SSIS Package as scheduled SQL Server JOB

 

Execute a SSIS package though SQL Job

In the SQL Server 2005, after you create a SSIS package ( DTS), you want to create a job and schedule to run it. You will get the error to prevent you to run the job. What is the problem?

Here is why: SQL Server 2005 is quite different from SQL Server 2000. In SQL Server 2000, you can create the job and run it without problem. In SQL Server 2005, you need to go through the security layer in order to run the job.

 

 

The logic is like this:                

 The job executor account needs the roles of sysadmin,  SQLAgentUserRole, SQLAgentReaderRole,  SQLAgentOperatorRole

  • The job needs to be run under Proxy account 
  • The job executor account is better to be used to create SSIS package and so you are sure the job executor account has the right to run this SSIS package.

 

The following steps can be followed to get the job done.

The work environment is MS SQL Server Management Studio and you log in as SA.

1. Create job executor account

 Highlight Security->New Login, say to make login as devlogin, type your password, default database can be your target database.

Server roles: check sysadmin

User mapping: your target database

Msdb database: you make sure to include SQLAgentUserRole, SQLAgentReaderRole,  SQLAgentOperatorRole

Then click OK

 

2    Create SQL proxy account and associate proxy account with job executor account

Here is the code and run it the query window.

Use master

 CREATE CREDENTIAL [SQLCredential] WITH IDENTITY = ‘yourdomain\myWindowAccount’, secret = ‘WindowLoginPassword’

  Use msdb

 Sp_add_proxy @proxy_name=’SQLProxy’, @credential_name=’SQLCredential’

Sp_grant_login_to_proxy @login_name=’ devlogin’, @proxy_name=’SQLProxy’

Sp_grant_proxy_to_subsystem @proxy_name=’SQLProxy’, @subsystem_name=’SSIS’

 

 

3.    Create SSIS package

In MS SQL Server Business Intelligence Development Studio, you use job executor account SQLDEV to create the SSIS package and make sure you can execute this package in SQL Server Business Intelligence Development Studio. Compile/build this package.

Create the job, schedule the job and run the job

 In SQL Server Management Studio, highlight SQL Server Agent -> Start. Highlight Job ->New Job, name it , myJob.

Under Steps, New Step, name it, Step1,

Type: SQL Server Integration Service Package

Run as: SQLProxy

Package source: File System

Browse to select your package file xxx.dtsx

Click Ok

Schedule your job and enable it

Now you can run your job.

********************************************************************************************************

Backups , Recovery Models in SQL Server


Backups

Microsoft, in SQL Server Books Online, defines backups as:

A copy of data that is used to restore and recover data after a system failure

SQL Backups can be created a number of ways and can incorporate all or some of the data, as well as some part of the transaction log. While this article is focused on 2005 syntax, most of the concepts are applicable to 2000. This is a huge topic. At best, I’m going to scratch the surface and give you enough information so you won’t start crying again. After reading this, you should be able to set up a reasonable set of backups for your system.

Recovery Models

In order to begin working on backups, the business needs define a database recovery model. In essence, a recovery model defines what you’re going to do with the transaction log data.

There are three recovery models: Full, Simple and Bulk Logged. These are pretty easy to define:

Simple in simple recovery mode, the transaction log is not backed up so you can only recover to the most recent full or differential backup.

Full in full recovery mode you backup the database and the transaction log so that you can recover the database to any point in time.

Bulk Logged in bulk logged mode, most transactions are stored in the transaction log, but some bulk operations such as bulk loads or index creation are not logged.

The two most commonly used modes are Simple and Full. Don’t necessarily assume that, of course, you always need to use Full recovery to protect your data. It is a business decision. The business is going to tell you if you need to recover to a point in time or if you simply need the last full backup. It’s going to define if your data is recoverable by other means, such as manual entry, or if you have to protect as much as possible as it comes across the wire. You use Simple recovery if you can afford to lose the data stored since the last full or differential backup and/or you just don’t need recovery to a point in time. In Simple mode, you must restore all secondary read/write file groups when you restore the primary. You use Simple mostly on secondary databases that are not an absolute vital part of the enterprise or reporting systems, with read only access so there isn’t a transaction log to worry about anyway. You use Full if every bit of the data is vital, you need to recover to a point in time or, usually in the case of very large databases (VLDB), you need to restore individual files and file groups independently of other files and file groups.

With both Simple and full recovery models, you can now run a Copy-Only backup which allows you to copy the database to a backup file, but doesn’t affect the log, differential backup schedules or impact recovery to a point in time. I’ll try to drill down on as many of these topics as possible through the article, but not the files and filegroups.

 

Working with Simple Recovery

Enough talk. Let’s get down to running backups. Let’s assume that we’re in Simple recovery on a small to mid-sized database. I’m going to use AdventureWorks for all the sample scripts. To set it to simple recovery:

ALTER DATABASE AdventureWorks SET RECOVERY SIMPLE

Your simplest backup strategy is to run, at regular intervals, the following SQL Server backup command, which will perform a full backup of the database:

BACKUP DATABASE AdventureWorks 
TO DISK = ‘C:\Backups\AdventureWorks.BAK’

 

What’s with all the typing you ask? Don’t we have GUI tools to handle the work for us? Yes, most simple backups can be performed using SQL Server Management Studio. However, if you want to learn and understand what Management Studio is doing for you, or if you want some fine grained control over what is backed up, how and where, then you’re going to have to break out the keyboard and put away the mouse.

The above command will precipitate a basic backup to disk. Most DBAs I know backup to file and then scrape the files onto a tape or some other media. This is because files on disk are simple and quick to recover, whereas media can sometimes be a bit of a pain. For example, we generally have two to three days worth of backups on our file systems for immediate recovery. We only go to the tape systems if we need to run restores for older backups.

What did that command do? It made a copy of all the committed data in the database. It also copied uncommitted log entries. These are used during recovery to either commit or rollback changes that occurred to the data during the backup process.

Copy-only backups

Normally, backing up a database affects other backup and restore processes. For example after running the previous command, any differential backups (a backup that only copies data changed since the last backup) would be using this as the starting point for data changes, not the backup you ran last night. As noted earlier, SQL 2005 introduces a new concept to backups, COPY_ONLY backups, which allow us to keep from interrupting the cycle:

BACKUP DATABASE AdventureWorks
TO DISK = ‘C:\Backups\AdventureWorks.bak’
WITH COPY_ONLY;

 

 

Already we’ve found one of those more granular moments when the Management Studio wouldn’t help you. If you want a copy only backup, you have to use the command line.

Differential backups

Let’s assume for a moment, that we’re still in simple recovery, but we’re dealing with a larger database, say something above 100 GB in size. Full backups can actually start to slow down the process a bit. Instead, after consultation with the business, we’ve decided to do a weekly full backup and daily differential backups. Differential backups only backup the data pages that have changed since the last full backup. Following is the SQL backup command to perform a differential backup:

BACKUP DATABASE AdventureWorks
TO DISK = ‘C:\backups\AdventureWorks.bak’ 
WITH DIFFERENTIAL

 

Now, if we had to restore this database, we’d first go to the last full backup, restore that, and then restore the differential backups in order (more on that later).

BACKUP DATABASE Adventureworks
TO DISK = ‘C:\backups\AdventureWorks.bak’
WITH INIT

 

There are a number of other backup options that I won’t be detailing here. Read the books online to see details on BLOCKSIZE, EXPIREDATE, RETAINDAYS, PASSWORD, NAME, STATS, and so on.

You can also run a statement that will check the integrity of a database backup. It doesn’t check the integrity of the data within a backup, but it does verify that the backup is formatted correctly and accessible.

RESTORE VERIFYONLY
FROM DISK = ‘C:\backups\Adventureworks.bak’

 

 

Full recovery and log backups

We’ve primarily been working on a database that was in Simple recovery mode (this used to be called Truncate Log on Checkpoint). In this mode, we do not backup the transaction logs for later recovery. Every backup under this mechanism is a database backup. Log backups are simply not possible.

However, you’ve only protected the data as of the last good backup, either full or differential. Let’s change our assumptions. Now we’re dealing with a large, mission critical application and database. We want to be able to recover this database up to the latest minute. This is a very important point. In theory, since the log entries are being stored and backed up, we’re protected up to the point of any failure. However, some failures can cause corruption of the log, making recovery to a point in time impossible. So, we have to determine what the reasonable minimum time between log backups will be. In this case we can live with no more than 15 minutes worth of lost data.

So, let’s start by putting our database in FULL recovery mode:

ALTER DATABASE AdventureWorks SET RECOVERY FULL

Then, on a scheduled basis, in this case every 15 minutes, we’ll run the SQL backup command for the transaction log:

BACKUP LOG Adventureworks
TO DISK = ‘C:\backups\AdventureWorks_Log.bak’;

This script will backup committed transactions from the transaction log. It has markers in the file that show the start and stop time. It will truncate the log when it successfully completes, cleaning out from the transaction log the committed transactions that have been written to the backup file. If necessary, you can use the WITH NO_TRUNCATE statement to capture data from the transaction log regardless of the state of the database, assuming it’s online and not in an EMERGENCY status. This is for emergencies only.

Note that we are not using the INIT statement in this case, but you can do so if you choose. When doing log backups, you’ve got options:

Run all the backups to a single file, where they’ll stack and all you have to do, on restore (covered later), is cycle through them.

Name the backups uniquely, probably using date and time in the string.

In that latter case, safety says, use INIT because you’re exercising maximum control over what gets backed up where, and you’ll be able to know exactly what a backup is, when it was taken and from where based on the name. This is yet another place where operating backups from the command line gives you more control than the GUI. We’ve used both approaches in our systems for different reasons. You can decide what is best for your technology and business requirements.

Most of the options available to the database backup are included in Log backup, including COPY_ONLY. This would allow you to capture a set of transaction data without affecting the log or the next scheduled log backup. This would be handy for taking production data to another system for troubleshooting etc.

If you have your database set to FULL Recovery, you need to run log backups. Sometimes, people forget and the transaction log grows to the point that it fills up the disk drive. In this case, you can run:

BACKUP LOG Adventureworks WITH NO_LOG;

Attaching NO_LOG to the log backup, and not specifying a location for the log, causes the inactive part of the log to be removed and it does this without a log entry itself, thus defeating the full disk drive. This is absolutely not recommended because it breaks the log chain, the series of log backups from which you would recover your database to a point in time. Microsoft recommends running a full backup immediately after using this statement. Further, they’re warning that this statement may be deprecated in a future release.

 

 

Restoring Databases

As important as SQL Server backups are, and they are vital, they are useless without the ability to restore the database.

 

Restoring a full database backup

Restoring a full database backup is as simple as it was to create:

RESTORE DATABASE Adventureworks
FROM DISK = ‘C:\Backup\AdventureWorks.bak’;

It’s really that simple – unless, as we we are backing up everything to a file as if it were a backup device. In that case, you’ll need to specify which file within the “device” you’re accessing. If you don’t know which file, you’ll need to generate a list:

RESTORE HEADERONLY
FROM DISK = ‘C:\Backup\Adventureworks.bak’;

This will give you the same list as I showed above from Management Studio. So now, if we wanted to restore the second file in the group, the COPY_ONLY backup, you would issue the following command:

RESTORE DATABASE AdventureWorks
FROM DISK = ‘C:\Backup\Adventureworks.bak’
WITH FILE = 2;

Unfortunately, if you’re following along, you may find that you just generated this error:

Msg 3159, Level 16, State 1, Line 1
The tail of the log for the database “AdventureWorks” has not been backed up.
Use BACKUP LOG WITH NORECOVERY to backup the log if it contains work you do
not want to lose. Use the WITH REPLACE or WITH STOPAT clause of the RESTORE
statement to just overwrite the contents of the log.
Msg 3013, Level 16, State 1, Line 1
RESTORE DATABASE is terminating abnormally.

What this means is, that your database is in full recovery mode, but you haven’t backed up the “tail of the log”, meaning the transactions entered since the last time you ran a backup. You can override this requirement if you change the previous syntax to:

RESTORE DATABASE AdventureWorks
FROM DISK = ‘C:\Backups\Adventureworks.bak’
WITH FILE = 2,
REPLACE;

That’s the first time we’ve stacked the WITH clauses (WITH FILE=2 and WITH REPLACE is represented as WITH FILE=2, REPLACE), but it won’t be the last. Read through the books online. Most of the WITH clause statements can be used in combination with the others.

What happens if we want to restore to a different database than the original? For example, we want to make a copy of our database from a separate backup. Maybe we want to move it down to a production support server where we are going to do some work on it, separate from the production copy of the database. If we take the simple approach, well, try this:

RESTORE DATABASE AdventureWorks_2
FROM DISK = ‘C:\Backups\Adventureworks.bak’
WITH FILE = 2;

In this case, you should see a whole series of errors relating to files not being overwritten. You really can create new databases from backups, but if you’re doing it on a server with the existing database, you’ll need to change the location of the physical files using the logical names. In order to know the logical names of the files for a given database, run this prior to attempting to move the files:

RESTORE FILELISTONLY
FROM DISK = ‘C:\Backups\Adventureworks.bak’
WITH FILE = 2;

 

This can then be used to identify the appropriate logical names in order to generate this script:

RESTORE DATABASE AdventureWorks_2
FROM DISK = ‘C:\Backups\Adventureworks.bak’
WITH FILE = 2,
   MOVE ‘AdventureWorks_Data’ TO ‘C:\backups\aw2_data.mdf’,
   MOVE ‘AdventureWorks_Log’ TO ‘C:\backups\aw2_log.ldf’;

 

 

Restoring a differential backup

The last method is to apply the differential backup. This requires two steps. First, we’ll restore the database, but with a twist and then we’ll apply the differential backup:

RESTORE DATABASE AdventureWorks
FROM DISK = ‘C:\Backups\Adventureworks.bak’
WITH FILE = 1 ,
   NORECOVERY,
   REPLACE;

RESTORE DATABASE AdventureWorks
FROM DISK = ‘C:\Backups\AdventureWorks.bak’
WITH FILE = 3;

Most of this is probably self-explanatory based on what we’ve already covered. The one wrinkle is the inclusion of the NORECOVERY keyword. Very simply, during a restore, transactions may have started during the backup process. Some of them complete and some don’t. At the end of a restore, completed transactions are rolled forward into the database and incomplete transactions are rolled back. Setting NORECOVERY keeps transactions open. This allows for the next set of transactions to be picked up from the next backup in order.

We’re mainly dealing with simple backups and restores in this article, but a more advanced restore in 2005 allows secondary file groups to be restored while the database is online. Its primary file group must be online during the operation. This will be more helpful for very large database systems.

 

 

Restoring SQL Server databases to a point in time

Restoring logs is not much more difficult than the differential database restore that we just completed. There’s just quite a bit more involved in restoring to a moment in time. Assuming you’re backing up your logs to a single file or device:

RESTORE HEADERONLY
FROM DISK = ‘C:\Backups\Adventureworks_log.bak’;

Otherwise, you simply go and get the file names you need. First run the database restore, taking care to leave it in a non-recovered state. Follow this up with a series of log restores to a point in time.

RESTORE DATABASE AdventureWorks FROM DISK = ‘C:\Backups\Adventureworks.bak’
WITH FILE = 1,
   NORECOVERY,
   REPLACE,
   STOPAT = ‘Oct 23, 2006 14:30:29.000’;

 

RESTORE LOG AdventureWorks
FROM DISK = ‘C:\Backups\Adventureworks_log.bak’
WITH FILE = 1,
   NORECOVERY,
   STOPAT = ‘Oct 23, 2006 14:30:29.000’;
RESTORE LOG AdventureWorks
FROM DISK = ‘C:\Backups\Adventureworks_log.bak’
WITH FILE = 2,
   NORECOVERY,
   STOPAT = ‘Oct 23, 2006 14:30:29.000’;

 

RESTORE LOG AdventureWorks
FROM DISK = ‘C:\Backups\Adventureworks_log.bak’
WITH FILE = 3,
   NORECOVERY,
   STOPAT = ‘Oct 23, 2006 14:30:29.000’;
RESTORE LOG AdventureWorks
FROM DISK = ‘C:\Backups\Adventureworks_log.bak’
WITH FILE = 4,
   STOPAT = ‘Oct 23, 2006 14:30:29.000’;

Now what we have is a database that is up to the exact, last committed transaction at 14:30:29 on the 23rd of October. Remember, during multi-step restores such as this, you have to leave the database in a recovering status. That means appending NORECOVERY to each statement until you’ve completed the restore process. If for some reason you’ve added NORECOVERY to all your statements, or you simply stop in the middle, and would like to bring the database back online, you can use this statement to complete the process:

RESTORE DATABASE Adventureworks
WITH RECOVERY;

 

Database snapshots

SQL Server 2005 introduced the concept of a snapshot, or a read-only, static view of a database. Snapshots are primarily created in order to supply a read-only version of a database for reporting purposes. However, they do function in a similar way to backups. The one primary difference is that all uncommitted transactions are rolled back. There is no option for rolling forward, capturing logs, etc., that backups provide, nor are very many SQL Server resources used at all. Rather, disk technology is used to create a copy of the data. Because of this they are much faster than backups both to create and restore.

 

A good use of snapshots, in addition to reporting, might be to create one prior to maintenance after you’ve already removed all the active users (and their transactions) from the system. While snapshots don’t support the volatility of live backups, their speed and ease of recovery make a great tool for quick recovery from a botched rollout. Snapshots are stored on the server, so you must make sure you’ve got adequate storage.

The syntax is different because you’re not backing up a database; you’re creating a new one:

CREATE DATABASE Adventureworks_ss1430
ON (NAME = AdventureWorks_Data,
FILENAME = ‘C:\Backups\AdventureWorks_data_1430.ss’)
AS SNAPSHOT OF AdventureWorks;

Now it will be accessible for read-only access. Since we’re primarily concerned with using this as a backup mechanism, let’s include the method for reverting a database to a database snapshot.

First, identify the snapshot you wish to use. If there is more than one on any database that you’re going to revert, you’ll need to delete all except the one you are using:

DROP DATABASE Adventureworks_ss1440;

Then you can revert the database by running a RESTORE statement (mixed metaphors, not good):

RESTORE DATABASE Adventureworks
FROM DATABASE_SNAPSHOT = Adventureworks_ss1430;
 

That’s it. On my system, running the database snapshots of Adventureworks took 136 ms. The full backup took 5,670 ms. The restore of the snapshot took 905ms and the database restore took 13,382ms. Incorporating this into a production rollout process could result in significant benefits

Again, it’s worth noting that there are some caveats to using the snapshot. You have to have enough disk space for a second copy of the database. You need to be careful dealing with snapshots since most of the syntax is similar to that used by databases themselves. Last, while there are snapshots attached to a database you can not run a restore from a database backup of that database.

SQL Server Database Backups


Database Backups :

 

Database backups are at the core of any SQL Server disaster recovery planning for any production system. Backups may be used to provide a means of recovery to a point-in-time when the database was last operational. Microsoft® SQL Server™ provides several types of backups that may be combined to formulate a customized disaster recovery plan depending on the nature of the data and the recovery requirements. It is highly recommended that all SQL Server databases be backed up periodically.

 

SQL Server backup media 

A database may be backed up to disk or to tape. The examples in this article assume a disk backup directly into a disk file (as opposed to a disk backup device). Any database can be backed up to a random disk file at any time. The file may either be initialized (using WITH INIT) or appended with the new backup.

 

Types of backups in SQL Server 

SQL Server provides several different kinds of backups including Complete, Differential, Transaction Log, and File(s) and Filegroup(s) backup.

A combination of these backups may be used to formulate a robust disaster recovery strategy. The following paragraphs explain each SQL Server backup type

 

 

Complete database backup 

A complete database backup creates a stand-alone image of the entire database. A complete database backup is self-dependent and may be restored to either the same or a new database on the same or a different server. This provides plenty of flexibility at the time when this backup has to be restored. A complete backup may be restored without the need for any other kind of backup. It may also be performed for databases in any recovery model. Restoring a complete database backup typically would be considered a starting point for a disaster recovery situation where the entire database is lost or damaged. It is recommended that a complete database backup be performed at regular intervals for all production databases. It is also recommended that a complete backup should be performed for system databases if there are any changes performed to the SQL Server operating environment such as creating or removing databases, configuring security, creating and modifying DTS/SSIS packages or scheduled jobs, adding and removing linked servers, etc.

 

 Backup syntax 

BACKUP DATABASE Northwind
TO DISK = ‘c:\backups\northwind.bak’
WITH INIT

Restore syntax (Same database)

RESTORE DATABASE Northwind
FROM DISK = ‘c:\backups\northwind.bak’

 

Restore syntax (New database and/or server)

 RESTORE DATABASE Northwind_new
FROM DISK = ‘c:\backups\northwind.bak’
WITH MOVE ‘northwind’ TO ‘c:\new_location\Northwind_new.mdf’
     MOVE ‘northwind_log’ TO ‘c:\new_location\Northwind_new_log.ldf’

 

 

Differential database backup 

A differential backup backs up only modified extents since the last complete backup. An extent is a group of 8 data pages each consisting of 8 KB (64 KB in total). By definition, differential backups are cumulative. The most recent differential backup contains all changes from all previous differential backups performed since the most recent complete database backup. Differential backups may be considered as an alternative for databases that are large and are modified infrequently. These would include data warehouse type of databases. Differential backups have several limitations including the following:

  • They do not provide point-in-time restore capabilities
  • They may only be restored after a complete database backup is restored
  • They may not be performed on the master database

  

Backup syntax

BACKUP DATABASE Northwind
TO DISK = ‘c:\backups\northwind_diff.bak’
WITH INIT, DIFFERENTIAL

 

Restore syntax (Same database – Note that a complete database backup is restored first using WITH NORECOVERY)

RESTORE DATABASE Northwind
FROM DISK = ‘c:\backups\northwind.bkp’
WITH NORECOVERY

RESTORE DATABASE Northwind
FROM DISK = ‘c:\northwind_diff.bkp’
WITH RECOVERY

 

 

Transaction log backup 

An SQL Server database consists of two components: data file(s) and transaction log file(s). A transaction log captures the modifications made to the database. A simple transaction may place several records in the transaction log. Each of these records is known as a log record and is assigned a unique identification number known as the log sequence number (LSN). Log records that belong to the same transaction are linked together through the LSN. If SQL Server service shuts down unexpectedly, upon restart the recovery process examines the entries in the transaction log and if there are transactions that have not been rolled forward completely, the recovery process rolls back the changes performed as part of these incomplete transactions. This operation is extremely important as it forms the basis of transactional recovery. Entries in the transaction log are also used if transactional replication is configured for the specific database.

A transaction log backup backs up all transactions since either the previous transaction log backup, or the complete database backup if there have been no transaction log backups performed for the database in the past. This backup may then be used to apply the backed-up changes, in case disaster recovery is required. Transaction log backups may only be applied to a database in an unrecovered state. A database may be in an unrecovered state if it is being restored from a set of backups as part of a disaster recovery procedure, or if it is configured as a standby database on a warm backup server. A transaction log backup also truncates the inactive portion of the transaction log, unless the database is configured as a Publisher in transactional replication and there are transactions pending propagation to Subscribers.

Each transaction log backup contains a First and Last log sequence number (LSN). Consecutive transaction log backups should have sequential LSNs for the boundary log records. These LSN values may be examined using the RESTORE HEADERONLY command. If LastLSN from the previously restored transaction log backup does not match the FirstLSN from the backup that is currently being restored, the restore operation fails with the following error: “This backup set cannot be restored because the database has not been rolled forward far enough. You must first restore all earlier logs before restoring this log”. If the above message is generated while restoring a particular transaction log backup, which is part of a set of transaction log backups that are to be restored, any attempts to restore further transaction log backups will fail with this message.

There could be several reasons for consecutive transaction log backups being out of sequence. Some of the most common reasons noted from support experience have been:

  • The database recovery model has been changed to Simple and back to either Full or Bulk-Logged. Switching the recovery mode to Simple causes the transaction log to be truncated
  • Another transaction log backup was performed between the previous successfully restored backup and the one generating this message
  • The transaction log was manually truncated between the two backups
  • The database was in Bulk-Logged recovery model and non-logged operations were performed
  • Transaction log backups are not allowed for databases in Simple Recovery model. While in Simple Recovery model, a database’s transaction log is truncated every time a CHECKPOINT is invoked for the database

Transaction log backups provide the possibility of performing a point-in-time restore or point-of-failure restore. You can also perform a restore to a named transaction with transaction log backups.

 

Backup syntax

 BACKUP LOG Northwind
TO DISK = ‘c:\backups\northwind_log_1.bak’
WITH INIT

 

Restore syntax (Same database – Note that a complete database backup is restored first using WITH NORECOVERY, then a sequence of transaction log backups)

 RESTORE DATABASE Northwind
FROM DISK = ‘c:\backups\northwind.bkp’
WITH NORECOVERY

RESTORE LOG Northwind
FROM DISK = ‘c:\northwind_log_1.bkp’
WITH RECOVERY

 

 

File(s) and Filegroup(s) backup 

Use BACKUP to back up database files and filegroups instead of the full database when time constraints make a full database backup impractical. To back up a file instead of the full database, put procedures in place to ensure that all files in the database are backed up regularly. Also, separate transaction log backups must be performed. After restoring a file backup, apply the transaction log to roll the file contents forward to make it consistent with the rest of the database.

Backup Plans and Strategy


Introduction

 

The intent of this document is to explore various backup scenario plans for SQL servers and come up with a recommendation to successfully restore them when the need arises. Database Administrators (DBAs) typically spend most of their time working on configuring and monitoring backup plans, but rarely take adequate time to test the restore path. DBAs should ask how confident they are that the backed up data can be restored when the need arises. The typical problem faced by DBAs is that they have to handle an ocean of backup sets and don’t know where to begin. More often, they spend hours trying to restore but end up restarting different series of backup files. All these lead to more downtime and data loss, in spite of having access to successful backups.

Backup Strategy

 

Taking backups is just the starting point for securing databases. What is crucial is the ability to anticipate disasters that can occur in a typical corporate environment. This list includes, but is not limited to, catastrophic events, disk crashes, network malfunctions, malicious data modifications, database corruptions, accidental data updates and so on. Each time the DBA might want to pickup a different restore sequence best suited for limited or no data loss with minimal downtime.

Having all these disaster types in mind, it is always challenging for DBAs to architect the backup plans and scheduled times, which will have a minimal impact on the production systems. In addition, backup plans need to accommodate future growth in the size of databases, as increase size requires increased backup time. It is unacceptable to have a backup plan in which by the time last night backup completes, next day’s backup starts again and in such a scenario the system always would be in the backup mode. In the subsequent paragraphs we will provide vital recommendations on backup plans, disaster simulations and restore paths for various situations and different types of databases including System and user databases.

To choose the most optimal backup plan, one must answer the following questions:

1. How much data can we afford to loose when disaster occurs?
2. What is the acceptable downtime window for restore and recovery?
3. Is this is a 24×7 database?
4. How large is the database and what type?

a. Is it a Static database? Loaded only once?
b. Is it a read-only database with periodic updates?
c. Is it a database that is frequently updated?
Here are some backup recommendations for SQL system databases:

Master Database:

The master database is essential for all Microsoft SQL Server operations. If the master database fails or becomes corrupt, it takes the whole server down with it. Unlike a critical user database that handles real-time customer transactions, the master database need not be backed up every hour or every half-hour. One needs to backup the master database for the following reasons:

a. after it is created
b. if the configuration values are changed
c. if the SQL logons are configured
d. any changes are made to the database
Model Database:

The Model databases a system database used in new database creation and is used as a template. There is no specific reason to backup the model database very often. Whenever a new object is created, database configuration is changed to make sure that all new databases are created with those objects and configuration. In such a situation, the database has to be backed up. Making a complete database backup is, in most cases, good enough.

MSDB Database:

The job schedule and the history are maintained and stored in the MSDB database. Hence, whenever jobs are scheduled through the SQL Server Agent, MSDB should be backed up. Also the information about replication and log shipping is kept in this database. Whenever, changes to a configuration related to the above mentioned items are made, the database should be backed up.

Distribution Database:

The Distribution database is available when replication is configured and the server is acting as a distributor. The recommendation is to schedule complete backups after snapshots. In the case of transactional replication, it is advisable to schedule regular log backups.

Tempdb

Normally, Tempdb need not be backed up. This database is recreated each time the SQL Server is started.

So far we have looked at backup recommendations for system databases. For user-created databases, let’s examine the following scenarios.

Scenario 1:

Consider a non 24×7 Database (about 500 MB to 1GB)
You should perform a complete database backup every night during the non-operational window and do transactional backups every hour. This approach provides both a point-in-time restore and a point-of-failure restore.

Scenario 2:

Consider a 24×7 Database (about 500 MB to 1GB)
You should perform a complete database backup every night during non-peak hours followed by a differential backup during off-peak time of the day. Transactional backups should be performed every 15 minutes, round the clock. This approach provides both a point-in-time restore and a point-of-failure restore. The restore time is also considerably less.

Scenario 3:

Consider a 24×7 Database (about 1 GB to 10GB)
You should perform a complete database backup every night. Do differential backups during off-peak time of the day. Do transactional backups every 15 minutes round the clock. Depending on the activity in the system, change transactional frequency.

Scenario 4:

Consider a 24×7 Database (about 10 GB to 60GB)
You should perform a complete database backup every night. Do differential backups during off-peak time of the day. Do transactional backups every 15 minutes round the clock. Depending on the activity in the system, change transactional frequency.

If the database grows beyond 60 GB, the time it takes to do a complete backup becomes the key factor. Taking individual file/file group backups is highly recommended thereafter. Today, sophisticated RAID disk systems are available and you should look at multiple files/file groups from a flexibility perspective rather than from a performance angle.

File/File Group backups give greater flexibility to handle very large databases for a number of reasons. Let’s take a look at some of them.

  • Recovery from isolated failures is faster. Only the damaged file needs to be restored rather than the entire database.
  • Enables greater flexibility to schedule backup jobs for very larger databases where the complete database backup is almost unmanageable.
  • One can organize file groups based on the updatability for better backup strategies. This enables frequent backups to frequently modified data files, rather than repetitive backup of static portions of the database.

You should be aware that these file/file group backups are not complete without transaction log backups. When restoring individual files, the transactions have to be rolled forward to make sure that the file or file group is consistent with the rest of the database. In order to reduce the number of transaction logs to be rolled forward, one should look into doing differential backups.

The primary disadvantage of the file and file group backups is the complexities involved in maintaining a complete set of backups to make sure that the restore yields a consistent database at the end.

Indexes and basic understanding


Index Structures

 

Indexes are created on columns in tables or views. The index provides a fast way to look up data based on the values within those columns. For example, if you create an index on the primary key and then search for a row of data based on one of the primary key values, SQL Server first finds that value in the index, and then uses the index to quickly locate the entire row of data. Without the index, a table scan would have to be performed in order to locate the row, which can have a significant effect on performance.

You can create indexes on most columns in a table or a view. The exceptions are primarily those columns configured with large object (LOB) data types, such as image, text, and varchar(max). You can also create indexes on XML columns, but those indexes are slightly different from the basic index and are beyond the scope of this article. Instead, I’ll focus on those indexes that are implemented most commonly in a SQL Server database.

An index is made up of a set of pages (index nodes) that are organized in a B-tree structure. This structure is hierarchical in nature, with the root node at the top of the hierarchy and the leaf nodes at the bottom, as shown in Figure 1.

 

Figure 1:

 

 

B-tree structure of a SQL Server index

 

When a query is issued against an indexed column, the query engine starts at the root node and navigates down through the intermediate nodes, with each layer of the intermediate level more granular than the one above. The query engine continues down through the index nodes until it reaches the leaf node. For example, if you’re searching for the value 123 in an indexed column, the query engine would first look in the root level to determine which page to reference in the top intermediate level. In this example, the first page points the values 1-100, and the second page, the values 101-200, so the query engine would go to the second page on that level. The query engine would then determine that it must go to the third page at the next intermediate level. From there, the query engine would navigate to the leaf node for value 123. The leaf node will contain either the entire row of data or a pointer to that row, depending on whether the index is clustered or nonclustered.

Clustered Indexes

A clustered index stores the actual data rows at the leaf level of the index. Returning to the example above, that would mean that the entire row of data associated with the primary key value of 123 would be stored in that leaf node. An important characteristic of the clustered index is that the indexed values are sorted in either ascending or descending order. As a result, there can be only one clustered index on a table or view. In addition, data in a table is sorted only if a clustered index has been defined on a table.

Note: A table that has a clustered index is referred to as a clustered table. A table that has no clustered index is referred to as a heap.

Nonclustered Indexes

Unlike a clustered indexed, the leaf nodes of a nonclustered index contain only the values from the indexed columns and row locators that point to the actual data rows, rather than contain the data rows themselves. This means that the query engine must take an additional step in order to locate the actual data.

A row locator’s structure depends on whether it points to a clustered table or to a heap. If referencing a clustered table, the row locator points to the clustered index, using the value from the clustered index to navigate to the correct data row. If referencing a heap, the row locator points to the actual data row.

Nonclustered indexes cannot be sorted like clustered indexes; however, you can create more than one nonclustered index per table or view. SQL Server 2005 supports up to 249 nonclustered indexes, and SQL Server 2008 support up to 999. This certainly doesn’t mean you should create that many indexes. Indexes can both help and hinder performance.

In addition to being able to create multiple nonclustered indexes on a table or view, you can also add included columns to your index. This means that you can store at the leaf level not only the values from the indexed column, but also the values from non-indexed columns. This strategy allows you to get around some of the limitations on indexes. For example, you can include non-indexed columns in order to exceed the size limit of indexed columns (900 bytes in most cases).

Index Types

In addition to an index being clustered or nonclustered, it can be configured in other ways:

  • Composite index: An index that contains more than one column. In both SQL Server 2005 and 2008, you can include up to 16 columns in an index, as long as the index doesn’t exceed the 900-byte limit. Both clustered and nonclustered indexes can be composite indexes.
  • Unique Index: An index that ensures the uniqueness of each value in the indexed column. If the index is a composite, the uniqueness is enforced across the columns as a whole, not on the individual columns. For example, if you were to create an index on the FirstName and LastName columns in a table, the names together must be unique, but the individual names can be duplicated.

A unique index is automatically created when you define a primary key or unique constraint:

  •  
    • Primary key: When you define a primary key constraint on one or more columns, SQL Server automatically creates a unique, clustered index if a clustered index does not already exist on the table or view. However, you can override the default behavior and define a unique, nonclustered index on the primary key.
    • Unique: When you define a unique constraint, SQL Server automatically creates a unique, nonclustered index. You can specify that a unique clustered index be created if a clustered index does not already exist on the table.
  • Covering index: A type of index that includes all the columns that are needed to process a particular query. For example, your query might retrieve the FirstName and LastName columns from a table, based on a value in the ContactID column. You can create a covering index that includes all three columns.

Index Design

As beneficial as indexes can be, they must be designed carefully. Because they can take up significant disk space, you don’t want to implement more indexes than necessary. In addition, indexes are automatically updated when the data rows themselves are updated, which can lead to additional overhead and can affect performance. As a result, index design should take into account a number of considerations.

Database

As mentioned above, indexes can enhance performance because they can provide a quick way for the query engine to find data. However, you must also take into account whether and how much you’re going to be inserting, updating, and deleting data. When you modify data, the indexes must also be modified to reflect the changed data, which can significantly affect performance. You should consider the following guidelines when planning your indexing strategy:

  • For tables that are heavily updated, use as few columns as possible in the index, and don’t over-index the tables.
  • If a table contains a lot of data but data modifications are low, use as many indexes as necessary to improve query performance. However, use indexes judiciously on small tables because the query engine might take longer to navigate the index than to perform a table scan.
  • For clustered indexes, try to keep the length of the indexed columns as short as possible. Ideally, try to implement your clustered indexes on unique columns that do not permit null values. This is why the primary key is often used for the table’s clustered index, although query considerations should also be taken into account when determining which columns should participate in the clustered index.
  • The uniqueness of values in a column affects index performance. In general, the more duplicate values you have in a column, the more poorly the index performs. On the other hand, the more unique each value, the better the performance. When possible, implement unique indexes.
  • For composite indexes, take into consideration the order of the columns in the index definition. Columns that will be used in comparison expressions in the WHERE clause (such as WHERE FirstName = ‘Charlie’) should be listed first. Subsequent columns should be listed based on the uniqueness of their values, with the most unique listed first.
  • You can also index computed columns if they meet certain requirements. For example, the expression used to generate the values must be deterministic (which means it always returns the same result for a specified set of inputs). For more details about indexing computed columns, see the topic “Creating Indexes on Computed Columns” in SQL Server Books Online.

Queries

Another consideration when setting up indexes is how the database will be queried. As mentioned above, you must take into account the frequency of data modifications. In addition, you should consider the following guidelines:

  • Try to insert or modify as many rows as possible in a single statement, rather than using multiple queries.
  • Create nonclustered indexes on columns used frequently in your statement’s predicates and join conditions.
  • Consider indexing columns used in exact-match queries.

*********************************************************************************

SQL Server 2005 MOM – Monitoring


SQL Server 2005 Jobs

We use MOM 2005 to monitor our production environment.  We hadn’t been receiving any alerts from MOM when a SQL Server 2005 job failed, so I was tasked with tracking the problem down. 

I checked the MOM configuration to ensure everything was setup properly.  I compared the SQL Server 2005 rules to the SQL Server 2000 rules since we were getting alerts when a SQL Server 2000 job failed.  The rules were configured the same. 

I then checked the Application Log for the 2005 job failures since that is what MOM uses.  I noticed that when a 2005 job failed, it wasn’t reported in the Application Log. 

In SQL Server 2000, by default, the “Write to Windows application event log” “When the job fails” option is checked in the “Notifications” tab when you create a job in Enterprise Manager. 
In SQL Server 2005, this option is not checked by default.

I modified one of our jobs, that I knew would fail, so that this option was checked.  I then ran the job.  MOM created an alert for the failed job.

We just took it for granted that this option was checked for us already in SQL Server 2005.  If you use MOM or any other software package that relies on the Event Log to find SQL Server 2005 issues, you will need to modify your jobs so that this option is checked. 

Since I’m a lazy and forgetful DBA, I am going to setup a job that will run either daily or weekly on each of our production SQL Servers that runs the code below so that the option is checked for all jobs.
Even though this is not a bug in SQL Server 2005, I consider this a serious problem for those of us using products such as MOM to monitor production. 

Here is the code to “fix” the existing jobs:

SET NOCOUNT ON

SELECT IDENTITY(int, 1, 1) AS agentJobId, name AS agentJobName
INTO #agentJob
FROM msdb.dbo.sysjobs
ORDER BY name

DECLARE @agentJobName sysname, @agentJobId int, @job_id uniqueidentifier

SET @agentJobId = 1

SELECT @agentJobName = agentJobName
FROM #agentJob
WHERE agentJobId = @agentJobId

WHILE @@ROWCOUNT <> 0
BEGIN
      EXEC msdb.dbo.sp_verify_job_identifiers
‘@job_name’, ‘@job_id’, @agentJobName OUTPUT, @job_id OUTPUT

      EXEC msdb.dbo.sp_update_job @job_id, @notify_level_eventlog = 2

      SELECT @agentJobId = @agentJobId + 1, @job_id = NULL
 
      SELECT @agentJobName = agentJobName
      FROM #agentJob
      WHERE agentJobId = @agentJobId
END

DROP TABLE #agentJob

Database Migration process in SQL Server – Steps to understand


Pre-Migration Checklist

  1. Analyze the disk space of the target server for the new database, if the disk space is not enough add more space on the target server
  2. Confirm the data and log file location for the target server
  3. Collect the information about the Database properties (Auto Stats, DB Owner, Recovery Model, Compatibility level, Trustworthy option etc)
  4. Collect the information of dependent applications, make sure application services will be stopped during the database migration
  5. Collect the information of database logins, users and their permissions. (Optional)
  6. Check the database for the Orphan users if any
  7. Check the SQL Server for any dependent objects (SQL Agent Jobs and Linked Servers)
  8. Check, if the database is part of any maintenance plan

 


 

Below are various scripts you can run to collect data.

Script to Check the Disk and Database Size

-- Procedure to check disc space
exec master..xp_fixeddrives
-- To Check database size
exec sp_helpdb [dbName]
or
use [dbName]
select str(sum(convert(dec(17,2),size)) / 128,10,2)  + 'MB'
from dbo.sysfiles
GO

 

Script to Check Database Properties

select
 sysDB.database_id,
 sysDB.Name as 'Database Name',
 syslogin.Name as 'DB Owner',
 sysDB.state_desc,
 sysDB.recovery_model_desc,
 sysDB.collation_name,
 sysDB.user_access_desc,
 sysDB.compatibility_level,
 sysDB.is_read_only,
 sysDB.is_auto_close_on,
 sysDB.is_auto_shrink_on,
 sysDB.is_auto_create_stats_on,
 sysDB.is_auto_update_stats_on,
 sysDB.is_fulltext_enabled,
 sysDB.is_trustworthy_on
from sys.databases sysDB
INNER JOIN sys.syslogins syslogin ON sysDB.owner_sid = syslogin.sid

Script to List Orphan Users

sp_change_users_login 'report'
GO 

 

 

Script to List Linked Servers

select  *from sys.sysservers 

 

 

Script to List Database Dependent Jobs

select  distinct  name, database_name from sysjobs sj 
INNER JOIN sysjobsteps sjt on sj.job_id = sjt.job_id

 

__________________________________________________________

 

  

Database Migration Checklist

These are the steps you would go through to make the change.

1. Stop the application services

2. Change the database to read-only mode (Optional)       

-- Script to make the database read-only
USE [master]
GO
ALTER DATABASE [DBName] SET  READ_ONLY WITH NO_WAIT
GO
ALTER DATABASE [DBName] SET  READ_ONLY 
 

 

3. Take the latest backup of all the databases involved in migration

4. Restore the databases on the target server on the appropriate drives

5. Cross check the database properties as per the database property script output, change the database properties as per the pre migration- checklist

 

 

Script to Change DB Owner

This will change the database owner to “sa”.  This can be used to change to any owner you would like.

USE databaseName
EXEC sp_changedbowner 'sa'

 

Script to Turn on Trustworthy Option

If trustworthy option was set, this will turn it on for the database.

ALTER DATABASE database_name SET TRUSTWORTHY ON

 

 

Script to Change the Database Compatibility Level

When you upgrade to a new version, the old compatibility level will remain.   

ALTER DATABASE DatabaseName
SET SINGLE_USER
GO
EXEC sp_dbcmptlevel DatabaseName, 90;
GO
ALTER DATABASE DatabaseName
SET MULTI_USER
GO

 

6. Execute the output of Login transfer script on the target server, to create logins on the target server you can get the code from this technet article: http://support.microsoft.com/kb/246133.

7. Fix Orphan Users

 

Script to Check and Fix Orphan Users

 

Script to check the orphan user

EXEC sp_change_users_login 'Report'
--Use below code to fix the Orphan User issue
DECLARE @username varchar(25)
DECLARE fixusers CURSOR
FOR
SELECT UserName = name FROM sysusers
WHERE issqluser = 1 and (sid is not null and sid <> 0x0)
and suser_sname(sid) is null
ORDER BY name
OPEN fixusers
FETCH NEXT FROM fixusers
INTO @username
WHILE @@FETCH_STATUS = 0
BEGIN
EXEC sp_change_users_login 'update_one', @username, @username
FETCH NEXT FROM fixusers
INTO @username
END
CLOSE fixusers
DEALLOCATE fixusers

 

8.  Execute DBCC UPDATEUSAGE on the restored database.

Run the DBCC UPDATEUSAGE command against the migrated database when upgrading to a newer version of SQL Server.

DBCC UPDATEUSAGE('database_name') WITH COUNT_ROWS
DBCC CHECKDB
OR
DBCC CHECKDB('database_name') WITH ALL_ERRORMSGS
 

9.  Rebuild Indexes (Optional) As per the requirement and time window you can execute this option.

Take a look at this tip to rebuild all indexes.

This will rebuild or reorganize all indexes for a particular table.

Index Rebuild :- This process drops the existing Index and Recreates the index.
Index Reorganize :- This process physically reorganizes the leaf nodes of the index.

-- Script for Index Rebuild
USE [DBName];
GO
ALTER INDEX ALL ON [ObjectName] REBUILD
GO
-- Script for Index Reorganize
USE AdventureWorks;
GO
ALTER INDEX ALL ON [ObjectName] REORGANIZE
GO

 

10. Updating Index Statistics

                    sp_updatestats

11.   Recompile procedures

Take a look at this tip to recompile all objects.

This will recompile a particular stored procedure.

                   sp_recompile ‘procedureName’

 

12. Start the application services, check the application functionality and check the Windows event logs.

13. Check the SQL Server Error Log for login failures and other errors

Take a look at this tip on how to read SQL Server Error Logs.

                   EXEC xp_readerrorlog 0,1,”Error”,Null

 

 14. Once the application team confirms that application is running fine take the databases offline on the source server or make them read only

-- Script to make the database readonly
USE [master]
GO
ALTER DATABASE [DBName] SET  READ_ONLY WITH NO_WAIT
GO
ALTER DATABASE [DBName] SET  READ_ONLY
GO
-- Script to take the database offline
EXEC sp_dboption N'DBName', N'offline', N'true'
OR
ALTER DATABASE [DBName] SET OFFLINE WITH
ROLLBACK IMMEDIATE

 

Understanding SQL Server Migration & Upgradation


Database Migration / Upgrade in SQL Server:

 

Microsoft has performed extensive enhancements on SQL Server 2005 product suite. The product is introduced in market not only as a RDBMS that meets new generation database needs but also as an advanced BI technology product.  Although DTS and SSIS are both ETL tools, their architectures diverge greatly. The advanced features that are introduced in the new product suites are:

  • Enterprise Performance
  • High Availability
  • Manageability
  • Security
  • Developer Productivity
  • Advanced Business Intelligence
  • Competitive features

This article explores the migration/upgrade path available for companies wanting to migrate to SQL Server 2005 from SQL Server 2000 or SQL Server 7.0. It also discusses the new SQL Server 2005 Upgrade Advisor tool that Microsoft has developed for the upgradation.

Upgrade Mechanism

 

The basic difference between upgrade and migration is that, an upgrade is an automated process in which a set-up program moves an old instance of the database to a new instance, keeping the metadata (with some exception) and data same. Migration is a manual process, where the old and the new instances resides at the same time and are helpful to verify correctness of the migration. Upgrading the database engine from older version of SQL Server to SQL Server 2005 is done easily using a set-up wizard. Moving the data from Data Transformation Service (DTS) to SSIS requires a migration. Following table summarizes the upgrade path for each component of SQL Server 2005.

 _______________________________________________________________________________________

SQL Server 2005 Component Upgrade/Migration Path 
Database engine Upgrade Tool: Setup
Migration Method: Side-by-side installation, then database backup/restore, detach/attach
Analysis Service Upgrade Tool: Setup
Migration Tool: Migration Wizard migrates objects, requires optimization and client provider upgrades
Integration Service Upgrade Tool: None
Migration Tool: DTS Migration Wizard
Migration Method: Migration Wizard converts 50-70 percent of tasks, requires some manual migration; runtime DTS DLLs available in SSIS; package re-architecture is recommended
Reporting Service Upgrade Tool: Setup
Migration Method: Side-by-side installation and deployment of reports on new instance
Notification Service Upgrade Tool: None
Migration Tool: Upgrade of Notification Services instances during install

______________________________________________________________________________________

Understanding Database Snapshot


Database Snapshots is a new feature introduced in SQL Server 2005.

It provides a mechanism that enables us to save an exact read only copy of a database when the snapshot was taken.

It would be highly beneficial that a database snapshot is taken when certain bulk changes to the table data is made or you need to maintain an exact copy of your database as it was at the end of the day, each day.

Some points to be noted while working with Database Snapshots.

 

Database snapshot can be created using T-SQL script only, it cannot be implemented using the Management Studio.

  • Database snapshot of a database should exist on the same SQL Instance as the Source Database. Why this is so would be beyond the point of discussion for this article, so I would explain in another blog soon.
  • Database snapshots cannot be backed up. (But in the Object Explorer you will see that the menu options are available to backup)
  • Database snapshot cannot be updated or modified (That is why it is referred as read only Copy)
  • It is available only in Enterprise Edition of SQL Server 2005.

 

******* Database Snapshot ***************
Example:

/****** Create CyberArk database Snapshot *******/
USE MASTER
GO
Create database [QEPVBG-R] on
( NAME = ‘QEPVBG’, — this one is source name
FILENAME= ‘D:\MSSQL2K8R2\QEPVAIM-R_Snapshot\QEPVBG-R.ss’ — giving new name here
)
AS SNAPSHOT OF [QEPVBG]
USE master
GO
Create database [QEPVAIM-R] on
( NAME = ‘QEPVAIM’, — this one is source name
FILENAME= ‘D:\MSSQL2K8R2\QEPVBG-R_Snapshot\QEPVAIM-R.ss’ — giving new name here
)
AS SNAPSHOT OF QEPVAIM

 

/****** Drop Database QEPVBG-R –  Snapshot DB *** Note that if database snapshot creation fails,

the snapshot is in suspect status and it should be deleted or dropped.

*******/

USE MASTER
GO
DROP database [QEPVAIM-R]
GO

USE MASTER
GO
Drop database [QEPVBG-R]
GO

— You can even plan for an SQL job, if need this Snapshot to be created for specific time intervals.

Undesrtanding SQL Server Database Snapshot


Database Snapshot in SQL Server 2005:

 

 

How does this new feature work?

 

The initial thought is that the snapshot needs to make a complete copy of your database so the data stays static. If this were the case, the snapshot would offer no advantage over a backup and restore.

Here’s what actually happens: When you create the snapshot, a shell of your database file is created. When data is read from the snapshot, it is actually being read from the primary database. As data changes in your primary database, SQL Server then writes out what the data looked like prior to the data change into a sparse file. The snapshot will then read the sparse file for the data that has been changed and continue to read the primary database for data that has not changed.

If you look at the picture directly below, 90% of the data has not been changed in the primary database, so 90% of the data is still retrieved from here when a query is issued. On the reverse, 10% of the data has changed to satisfy the results for a query that uses the data it would read from the sparse file.


Source: SQL Server 2005 Books Online

As with any new tool or feature, there are always advantages and disadvantages. Here are a couple of key points.

 
What are the advantages?

 

  • Since SQL Server only needs to deal with the data that has changed for Database Snapshots to work, the initial creation of the snapshot or subsequent snapshots is extremely fast.
  • Data is only written to the sparse file as data changes, so your overall disk space needs don’t double each time you create a new snapshot — as they would if you were using backup and restore for the secondary copy.
  • You can create several snapshots for the same database, so you could have rolling snapshots occurring every hour for auditing purposes.
  • It is considerably easier to work with snapshots and much faster than backup and restore operations.
  • You can create a static reporting environment quickly with minimal effort.
  • Corrupt or deleted data can be retrieved from the snapshot to repair the primary database.
  • Database changes can be audited with the use of snapshots.
  • You have the ability to restore your database using a snapshot.

 
What are the disadvantages?

  • Your user community is still hitting your primary database. If you already experience performance issues because of reporting requirements, using snapshots for reporting will not resolve that problem.
  • The snapshot is tied to your primary database, so you cannot take the primary database offline. If the primary database goes offline, you won’t be able to access any of your snapshots.
  • The Database Snapshots feature is only available in the Enterprise Edition.
  • If data changes rapidly, this option may not offer benefits for keeping snapshots around for a long period of time.
  • Full-text indexing is not supported in the snapshot.

 

******************************************************************************************************

Creating and Restoring Database Snapshot:

 

The following guidelines show you how simple it is to create and use database snapshots.

 

 

Creating a Database Snapshot:

To create a database snapshot you must use a T-SQL command; managing snapshots is not currently supported through Management Studio. The syntax to create the snapshot is as follows:

CREATE DATABASE Northwind_Snapshot1000 ON (NAME = Northwind_Data, 
FILENAME = 'C:\MSSQL\Snapshot\Northwind_data1000.ss' ) 
AS SNAPSHOT OF Northwind

If you wanted to create a snapshot every hour, you could issue the above statement again and just change the name of the database and the file. Now you will have two snapshots, but different static data.

 CREATE DATABASE Northwind_Snapshot1100 ON (NAME = Northwind_Data, 
FILENAME = 'C:\MSSQL\Snapshot\Northwind_data1100.ss' ) 
AS SNAPSHOT OF Northwind

 

Usage of Snapshot:

Using the snapshot is just like using any other database. You attach to the database and execute your queries. The thing to remember though is that this is a read-only copy, so you can’t issue UPDATE, DELETE or INSERT statements.

Dropping:

To get rid of a database snapshot, drop the snapshot just like you would drop any other database.

 DROP DATABASE Northwind_Snapshot1000

Restoring:

One nice thing about the snapshots feature is that you can restore your entire database back to the point in time when the snapshot was taken. If there were issues with your database and you needed to revert back to the exact time the snapshot was taken, you could use a restore command. Another option is to use snapshots to restore a test environment back to its state when the testing began.

So you can take a snapshot, let your users or developers do their testing and then restore the snapshot so the database is back to the point in time prior to when testing began. To restore a database using a snapshot you can use the following command:

 RESTORE DATABASE Northwind FROM DATABASE_SNAPSHOT = 'Northwind_Snapshot1100'

 

 

Summary

As you can see, Database Snapshots is simple to implement and use. The first time I did I was able to create a snapshot, run some queries and drop the snapshot in a couple of minutes. This is not one of those high-tech features that will take you weeks to figure out or even determine if it is something you should be using. If you haven’t already tried creating and using Database Snapshots, I think you will be surprised by how easy it is.

Example :
******* Database Snapshot ***************
Example:

/****** Create CyberArk database Snapshot *******/
USE MASTER
GO
Create database [QEPVBG-R] on
( NAME = ‘QEPVBG’, — this one is source name
FILENAME= ‘D:\MSSQL2K8R2\QEPVAIM-R_Snapshot\QEPVBG-R.ss’ — giving new name here
)
AS SNAPSHOT OF [QEPVBG]
USE master
GO
Create database [QEPVAIM-R] on
( NAME = ‘QEPVAIM’, — this one is source name
FILENAME= ‘D:\MSSQL2K8R2\QEPVBG-R_Snapshot\QEPVAIM-R.ss’ — giving new name here
)
AS SNAPSHOT OF QEPVAIM

 

/****** Drop Database QEPVBG-R –  Snapshot DB *** Note that if database snapshot creation fails,

the snapshot is in suspect status and it should be deleted or dropped.

*******/

USE MASTER
GO
DROP database [QEPVAIM-R]
GO

USE MASTER
GO
Drop database [QEPVBG-R]
GO

— You can even plan for an SQL job, if need this Snapshot to be created for specific time intervals.
— Since I planned for every hour on Mirrored database.

 

Working and Understanding how access will be granted to USER


Scenario:
I have been tasked with auditing security on  SQL Server. I understand that logins allow you to connect to SQL Server, but I’m not quite understanding how to determine whether a login has access to a database or not. For instance, I know that all logins can access the master database, but when I look at a login in SQL Server Management Studio, I don’t see a checkbox beside the master db for that login. How can I determine what databases a login has access to?

Process & Solution:
You’re right, that if you look in SQL Server Management Studio you won’t see a login’s access to the master database unless the DBA has done something explicitly. An example is shown in Figure 1.

Figure 1:

Login with no explicit mapping to the master database

But we do know that all logins can access the master database. So let’s talk about how a login can connect to a given database. There are five possible ways a login can have permission to connect to a given database:

  1. Explicit access is granted.
  2. The login is a member of the sysadmin fixed server role.
  3. The login has CONTROL SERVER permissions (SQL Server 2005/2008 only).
  4. The login is the owner of the database.
  5. The guest user is enabled on the database.

 


 

Explicit Access (Login Mapped to Database User):

The first way is if a login is given explicit access to a database. For instance, in SQL Server 2000, if I had a user MyTestUser, I would grant access like so from within the database:

EXEC sp_grantdbaccess 'MyTestUser';
GO
 

In SQL Server 2005 and 2008 there are new T-SQL commands to create logins and users. So I would use the following command to do the same thing:

CREATE USER MyTestUser FOR LOGIN MyTestUser;
GO
 

A login granted access in this manner should appear in the sysusers table (SQL Server 2000) or the sys.database_principals catalog view (SQL Server 2005/2008).

For instance, here’s how I would match up users in a given database to their corresponding logins (SQL Server 2000):

SELECT sl.name AS 'Login', su.name AS 'User' 
FROM master..syslogins sl
  JOIN sysusers su
    ON sl.sid = su.sid
ORDER BY sl.name, su.name;
 

And here’s how we’d do it in SQL Server 2005/2008:

SELECT sp.name AS 'Login', dp.name AS 'User'
FROM sys.database_principals dp
  JOIN sys.server_principals sp
    ON dp.sid = sp.sid
ORDER BY sp.name, dp.name;

If you see a login match up to a user in this manner, then the login has access to the database.


 

Implicit Access (Member of Sysadmin Fixed Server Role):

All members of the sysadmin fixed server role map to the dbo user of every database. Therefore, if a login is a member of this role, it automatically has access to every database.

Here is the query for SQL 2000 to see members of the sysadmin fixed server role.

EXEC sp_helpsrvrolemember 'sysadmin'

 

Here is the query for SQL 2005/2008 to see members of the sysadmin fixed server role.

SELECT sp.name
FROM sys.server_role_members srm
INNER JOIN sys.server_principals sp
     ON srm.member_principal_id = sp.principal_id
WHERE srm.role_principal_id = (
     SELECT principal_id
     FROM sys.server_principals
     WHERE [Name] = 'sysadmin')


 

Implicit Access (CONTROL SERVER permission – SQL Server 2005/2008):

 

The CONTROL SERVER permission gives equivalent rights as a member of the sysadmin role with a few exceptions, which aren’t of importance here. Therefore, if a login doesn’t map explicitly to a user in a database, but that login has CONTROL SERVER permissions, that login can still access the database. You can see who has CONTROL SERVER permissions by the following query:

SELECT sp.name 'Login' 
FROM sys.server_principals sp
   JOIN sys.server_permissions perms
     ON sp.principal_id = perms.grantee_principal_id
WHERE perms.type = 'CL'     
  AND perms.state = 'G';


 

Implicit Access (Database Owner):

 

The database owner automatically maps into the database as the dbo user. The query given under explicit access should reveal the owner by just looking at the dbo user. However, another way is to query the sysdatabases table (SQL Server 2000) or sys.databases catalog view (SQL Server 2005/2008). Here’s the SQL Server 2000 query that reveals all the owners of all the databases on the server:

SELECT db.name AS 'Database', sl.name AS 'Owner' 
FROM sysdatabases db
  INNER JOIN syslogins sl
    ON db.sid = sl.sid
ORDER BY db.name;

And here’s how to do the same thing in SQL Server 2005/2008:

 

SELECT db.name AS 'Database', sp.name AS 'Owner'
FROM sys.databases db
  INNER JOIN sys.server_principals sp
    ON db.owner_sid = sp.sid
ORDER BY db.name;


 

Implicit Access (Guest User Is Enabled):

 

The final way a login can get access to a database is if the guest user is enabled for that database. If a login cannot map in any other way, it’ll use guest if that’s available. That’s actually how logins can access the master database. The guest user is enabled. With respect to user databases, the guest user should only be enabled in special cases. The default is for it to be disabled. However, there are two system databases which the guest user must always remain enabled. They are:

  • master
  • tempdb

 

And that explains why logins always have access to master, even when explicit rights aren’t visible. To see if the guest user is enabled we can query sysusers (SQL Server 2000) or sys.database_permissions (SQL Server 2005/2008). Here’s how to do it in SQL Server 2000:

SELECT su.name, CASE su.hasdbaccess WHEN 1 THEN 'Yes' ELSE 'No' END AS 'Enabled'
FROM sysusers su
WHERE su.name = 'guest';

In SQL Server 2005/2008 we have to look for the existence of the CONNECT permission at the database level for the guest user. If it exists, the guest user is enabled. If it doesn’t, then the guest user is not.

SELECT dp.name, CASE perms.class WHEN 0 THEN 'Yes' ELSE 'No' END AS 'Enabled'
FROM sys.database_principals dp
  LEFT JOIN (SELECT grantee_principal_id, class FROM sys.database_permissions 
              WHERE class = 0 AND type = 'CO' AND state = 'G') AS perms
    ON dp.principal_id = perms.grantee_principal_id
WHERE dp.name = 'guest';
 

SQL Server Security – Understanding Roles


Fixed Database Roles:

 

Fixed database roles are defined at the database level and exist in
each database.

You cannot add, delete or modify fixed database roles. You can only
add users as a member of a fixed database roles.

There are nine fixed database roles:

db_owner

db_accessadmin

db_datareader

db_datawriter

db_ddladmin

db_securityadmin

db_backupoperator

db_denydatareader

db_denydatawriter

The members of db_owner database role can perform any activity in the database.

The members of db_accessadmin database role can add or remove Windows NT groups, users or SQL Server users in the database.

The members of db_datareader database role can see any data from all user tables in the database.

The members of db_datawriter database role can add, change, or delete data from all user tables in the database.

The members of db_ddladmin database role can make any data definition language commands in the database.

The members of db_securityadmin database role can manage statement and object permissions in the database.

The members of db_backupoperator database role can back up the database.

The members of db_denydatareader database role can deny permission to select data in the database.

The members of db_denydatawriter database role can deny permission to change data in the database.

To add a security account as a member of an existing SQL Server database role in the current database, you can use sp_addrolemember system stored procedure.

This is the syntax:

sp_addrolemember [@rolename =] 'role',
    [@membername =] 'security_account'

where @rolename – is the name of the database role.

@membername – is the name of the security account.

Any member of a fixed database role can add other users to this role.

_____________________________________________________________________________________

The benefits of using roles

 
Roles are a part of the tiered security model:

  • Login security—Connecting to the server
  • Database security—Getting access to the database
  • Database objects—Getting access to individual database objects and data

First, the user must log in to the server by entering a password. Once connected to the server, access to the stored databases is determined by user accounts. After gaining access to an actual database, the user is restricted to the data he or she can view and modify.

The main benefit of roles is efficient management. Imagine a group of 1,000 users suddenly needing to view or modify new data. Using Windows security, you simply select an existing Windows group and assign it to a SQL Server role—instead of modifying 1,000 user accounts.

To clarify, Windows groups consist of users with access to the Windows network, but SQL Server roles belong strictly to SQL Server. You’re simply granting permissions to SQL Server data and objects to valid Windows users.
Role types
Server roles are maintained by the database administrator (DBA) and apply to the entire server, not an individual database file. The public role sets the basic default permissions for all users. Every user that’s added to SQL Server is automatically assigned to the public role—you don’t need to do anything. Database roles are applied to an individual database.
Predefined database roles

 
You may need to create your own, but you have access to several predefined database roles:

  • db_owner: Members have full access.
  • db_accessadmin: Members can manage Windows groups and SQL Server logins.
  • db_datareader: Members can read all data.
  • db_datawriter: Members can add, delete, or modify data in the tables.
  • db_ddladmin: Members can run dynamic-link library (DLL) statements.
  • db_securityadmin: Members can modify role membership and manage permissions.
  • db_bckupoperator: Members can back up the database.
  • db_denydatareader: Members can’t view data within the database.
  • db_denydatawriter: Members can’t change or delete data in tables or views.

 
Fixed SQL Server roles:

 
The fixed server roles are applied serverwide, and there are several predefined server roles:

  • SysAdmin: Any member can perform any action on the server.
  • ServerAdmin: Any member can set configuration options on the server.
  • SetupAdmin: Any member can manage linked servers and SQL Server startup options and tasks.
  • Security Admin: Any member can manage server security.
  • ProcessAdmin: Any member can kill processes running on SQL Server.
  • DbCreator: Any member can create, alter, drop, and restore databases.
  • DiskAdmin: Any member can manage SQL Server disk files.
  • BulkAdmin: Any member can run the bulk insert command.

 

******************************************************************************************************

Troubleshooting and working with Orphaned Users


Understanding Logins and Users:

Although the terms login and user are often used interchangeably, they are very different.

  • A login is used for user authentication
  • A database user account is used for database access and permissions validation.

Logins are associated to users by the security identifier (SID). A login is required for access to the SQL Server server. The process of verifying that a particular login is valid is called “authentication”. This login must be associated to a SQL Server database user. You use the user account to control activities performed in the database. If no user account exists in a database for a specific login, the user that is using that login cannot access the database even though the user may be able to connect to SQL Server. The single exception to this situation is when the database contains the “guest” user account. A login that does not have an associated user account is mapped to the guest user. Conversely, if a database user exists but there is no login associated, the user is not able to log into SQL Server server.

When a database is restored to a different server it contains a set of users and permissions but there may not be any corresponding logins or the logins may not be associated with the same users. This condition is known as having “orphaned users.”

 

 

Troubleshooting and working with Orphaned Users

 

When you restore a database backup to another server, you may experience a problem with orphaned users. The following scenario illustrates the problem and shows how to resolve it.

Use master
sp_addlogin ‘test’, ‘password’, ‘Northwind’

SELECT sid FROM dbo.sysxlogins WHERE name = ‘test’
0xE5EFF2DB1688C246855B013148882E75

Grant access to the user you just created

Use Northwind
sp_grantdbaccess ‘test’

SELECT sid FROM dbo.sysusers WHERE name = ‘test’
0xE5EFF2DB1688C246855B013148882E75

As you can see, both SID’s are identical.

Backup the database

Use master
BACKUP DATABASE Northwind
TO DISK = ‘C:\Northwind.bak’

Copy the Backupfile to another Maschine and SQL Server and restore it as follows:

RESTORE FILELISTONLY
FROM DISK = ‘C:\Users\Zahn\Work\Northwind.bak’

Northwind
Northwind_log

RESTORE DATABASE TestDB
FROM DISK = ‘C:\Users\Zahn\Work\Northwind.bak’
WITH
 MOVE ‘Northwind’ TO ‘D:\DataMSSQL\Data\northwnd.mdf’,
 MOVE ‘Northwind_log’ TO ‘D:\DataMSSQL\Data\northwnd.ldf’

The restored database contains a user named “test” without a corresponding login, which results in “test” being orphaned.

Check the SID’s

Use master
SELECT sid FROM dbo.sysxlogins WHERE name = ‘test’
0x39EE98D37EAC2243B7833705EC1C60E3

Use TestDB
SELECT
sid FROM dbo.sysusers WHERE name = ‘test’
0xE5EFF2DB1688C246855B013148882E75

Now, to detect orphaned users, run this code

Use TestDB
sp_change_users_login ‘report’

test 0xE5EFF2DB1688C246855B013148882E75

The output lists all the logins, which have a mismatch between the entries in the sysusers system table, of the TestDB database, and the sysxlogins system table in the master database.

Resolve Orphaned Users

Use TestDB
sp_change_users_login ‘update_one’, ‘test’, ‘test’

SELECT sid FROM dbo.sysusers WHERE name = ‘test’
0x39EE98D37EAC2243B7833705EC1C60E3

use master
SELECT
sid FROM dbo.sysxlogins WHERE name = ‘test’
0x39EE98D37EAC2243B7833705EC1C60E3

This relinks the server login “test” with the the TestDB database user “test”. The sp_change_users_login stored procedure can also perform an update of all orphaned users with the “auto_fix” parameter but this is not recommended because SQL Server attempts to match logins and users by name. For most cases this works; however, if the wrong login is associated with a user, a user may have incorrect permissions.

Get failed JOBS from SQL Server Agent


 
CREATE VIEW dbo.View_FailedJobs

AS

SELECT  JJ.instance_id
  ,sj.job_id
  ,sj.name as ‘JOB_NAME’
  ,sjt.step_name as ‘STEP_NAME’
  ,JJ.run_status
  ,JJ.sql_message_id
  ,JJ.sql_severity
  ,JJ.message
  ,JJ.exec_date
  ,JJ.run_duration
  ,JJ.server
  ,sjt.output_file_name


FROM  

( SELECT  ssh.instance_id
     ,sjh.job_id
     ,sjh.step_id
     ,sjh.sql_message_id
     ,sjh.sql_severity
     ,sjh.message
     ,( CASE sjh.run_status
      WHEN 0 THEN ‘Failed’
      WHEN 1 THEN ‘Succeeded’
      WHEN 2 THEN ‘Retry’
      WHEN 3 THEN ‘Canceled’ 
      WHEN 4 THEN ‘In progress’
     END) as run_status
     ,((SUBSTRING(CAST(sjh.run_date AS VARCHAR(8)), 5, 2) + ‘/’
     + SUBSTRING(CAST(sjh.run_date AS VARCHAR(8)), 7, 2) + ‘/’
     + SUBSTRING(CAST(sjh.run_date AS VARCHAR(8)), 1, 4) + ‘ ‘
     + SUBSTRING((REPLICATE(‘0’,6-LEN(CAST(sjh.run_time AS varchar)))
     + CAST(sjh.run_time AS VARCHAR)), 1, 2) + ‘:’
     + SUBSTRING((REPLICATE(‘0’,6-LEN(CAST(sjh.run_time AS VARCHAR)))
     + CAST(sjh.run_time AS VARCHAR)), 3, 2) + ‘:’
     + SUBSTRING((REPLICATE(‘0’,6-LEN(CAST(sjh.run_time as varchar)))
     + CAST(sjh.run_time AS VARCHAR)), 5, 2))) AS ‘exec_date’
     ,sjh.run_duration
     ,sjh.retries_attempted
     ,sjh.server
   FROM msdb.dbo.sysjobhistory sjh
   JOIN ( SELECT sjh.job_id
      ,sjh.step_id
      ,MAX(sjh.instance_id) as instance_id
     FROM msdb.dbo.sysjobhistory sjh
     GROUP BY sjh.job_id
       ,sjh.step_id
    ) AS ssh ON sjh.instance_id = ssh.instance_id
   WHERE sjh.run_status <> 1
  ) AS JJ
JOIN  msdb.dbo.sysjobs sj
  ON (jj.job_id = sj.job_id)
JOIN  msdb.dbo.sysjobsteps sjt
  ON (jj.job_id = sjt.job_id AND jj.step_id = sjt.step_id)
GO

 

Update Statistics for all the databases


USE master
go

exec sp_MSforeachdb
 
‘ IF (”?” NOT IN (”master”,”tempdb”,”model”,”msdb”,”ReportServerTempDB”))

BEGIN
PRINT ”Updating Statistics” + ”?”
use ?
exec sp_updatestats
END’;

%d bloggers like this: