Monthly Archives: June, 2010

Get count of tables along with count of rows in all tables.


Execute the following Microsoft SQL Server T-SQL scripts in Management Studio Query Editor to list accurate and approximate (fast) rowcounts for all the tables in the database.

— SQL Server count all rows in all tables – sql server rowcount all tables

DECLARE  @CountStats  TABLE(

SchemaName SYSNAME,

TableName  SYSNAME,

RowsCount  INT

)

INSERT @CountStats

EXEC sp_msForEachTable

‘SELECT PARSENAME(”?”, 2), PARSENAME(”?”, 1), COUNT(*) FROM ?’

SELECT   *

FROM     @CountStats

ORDER BY RowsCount DESC

/* Partial results

SchemaName  TableName                     RowsCount

Sales       SalesOrderDetail              121317

Production  TransactionHistory            113443

Production  TransactionHistoryArchive     89253

Production  WorkOrder                     72591

*/
———-

— APPROXIMATE – very quick

— SQL count rows in all tables

USE AdventureWorks2008;

DECLARE @SQL NVARCHAR(255)

SET @SQL = ‘DBCC UPDATEUSAGE (‘ + DB_NAME() + ‘)’

EXEC sp_executeSQL @SQL

SELECT Schema_name(t.schema_id) AS SchemaName,

t.name                   AS TableName,

i.rows                   as [Rows]

FROM   sys.tables AS t

INNER JOIN sys.sysindexes AS i

ON t.object_id = i.id

AND i.indid < 2

ORDER BY [Rows] DESC

GO

/* Partial results

SchemaName  TableName                     Rows

Sales       SalesOrderDetail              121317

Production  TransactionHistory            113443

Production  TransactionHistoryArchive     89253

Production  WorkOrder                     72591

Production  WorkOrderRouting              67131

*/
———-

— SQL Server 2000 – approximate quick count

USE Northwind;

SELECT      TableName = o.name,

[Rows] = replace(convert(VARCHAR,convert(MONEY,max(i.rows)),1), ‘.00’,”)

FROM sysobjects o

INNER JOIN sysindexes i

ON o.id = i.id

WHERE xtype = ‘u’

AND OBJECTPROPERTY(o.id,N’IsUserTable’) = 1

GROUP BY o.name

ORDER BY max(i.rows) DESC

GO

/* Partial resutls

TableName               Rows

Order Details           2,155

Orders                  830

Customers               91

Products                77

Territories             53

EmployeeTerritories     49

*/
———-

— ACCURATE COUNT with cursor

— SQL Server find rows count in all tables and views – select count(*)

USE AdventureWorks2008;

DECLARE  @SchemaName SYSNAME,

@TableName  SYSNAME,

@TableType  varchar(12)

DECLARE  @SQL NVARCHAR(MAX)

CREATE TABLE #Population (

TableName  VARCHAR(256),

TableType varchar(12),

[Population] INT);

DECLARE curTablesAndViews CURSOR FAST_FORWARD FOR

SELECT TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE

FROM   INFORMATION_SCHEMA.TABLES

OPEN curTablesAndViews

FETCH NEXT FROM curTablesAndViews

INTO @SchemaName,

@TableName,

@TableType

WHILE (@@FETCH_STATUS = 0)

BEGIN

SELECT @SQL = ‘INSERT #Population SELECT ”’ +

@SchemaName + ‘.’ + @TableName +

”’,”’+ @TableType+”’, COUNT(*) as Population ‘ +

‘FROM [‘ + @SchemaName + ‘].[‘ + @TableName + ‘]’

PRINT @SQL — debugging

EXEC SP_EXECUTESQL   @SQL

FETCH NEXT FROM curTablesAndViews

INTO @SchemaName,

@TableName,

@TableType

END

CLOSE curTablesAndViews

DEALLOCATE curTablesAndViews

— Return the list of rows counts

SELECT   *

FROM     #Population

ORDER BY [Population] DESC

GO

DROP TABLE #Population

/*

TableName                     TableType   Population

…….

Person.BusinessEntityAddress  BASE TABLE  19614

Person.Address                BASE TABLE  19614

Sales.CreditCard              BASE TABLE  19118

Sales.PersonCreditCard        BASE TABLE  19118

Sales.vIndividualCustomer     VIEW        18508

…….

*/
———-

— SQL Server 2005/2008 – approximate, very quick

DECLARE @SQL NVARCHAR(255)

SET @SQL = ‘DBCC UPDATEUSAGE (‘ + DB_NAME() + ‘)’

EXEC sp_executeSQL @SQL

SELECT schema_name(o.schema_id)    AS SchemaName,

object_name(o.object_id)    AS TableName,

SUM(row_count)              AS [Count]

FROM     sys.dm_db_partition_stats ps

INNER JOIN sys.objects o

ON o.object_id = ps.object_id

WHERE    index_id < 2

AND TYPE = ‘U’

GROUP BY o.schema_id,  o.object_id

ORDER BY [Count] desc

/*

SchemaName  TableName                     Count

Production  TransactionHistory            14520704

Sales       SalesOrderDetail              121317

Production  TransactionHistoryArchive     89253

Production  WorkOrder                     72591

*/

———-

Backup Plan for huge databases


The following backup plan is recommended for very large Microsoft SQL Server databases with traditional business hours operation:

 

  Full backup Saturday morning

  Differential backup Mon-Thu nights

  Transaction backup hourly Mon-Fri 8am to 6pm

— SQL full database backup

BACKUP DATABASE [AdventureWorks2008]

TO  DISK = N’F:\data\AdventureWorks2008\backup\AW8.BAK’

WITH NOFORMAT, INIT, 

NAME = N’AdventureWorks2008-Full Database Backup’,

SKIP, NOREWIND, NOUNLOAD,  STATS = 10

GO

— SQL diffential database backup

BACKUP DATABASE [AdventureWorks2008]

TO  DISK = N’F:\data\AdventureWorks2008\backup\AW8D.BAK’

WITH  DIFFERENTIAL , NOFORMAT, NOINIT, 

NAME = N’AdventureWorks2008-Differential Database Backup’,

SKIP, NOREWIND, NOUNLOAD,  STATS = 10

GO

— SQL transaction log backup

BACKUP LOG [AdventureWorks2008]

TO  DISK = N’F:\data\AdventureWorks2008\backup\AW8T0900.BAK’

WITH NOFORMAT, NOINIT,  NAME = N’AdventureWorks2008-Transaction Log  Backup’,

SKIP, NOREWIND, NOUNLOAD,  STATS = 10

GO

————

Identifying blocking processes


Execute the following Microsoft SQL Server T-SQL scripts in the application database:

use AdventureWorks
select * from sys.sysprocesses where blocked != 0
— or
exec sp_who

Query and blocking information by spid:

— SQL Query information for current procecces

SELECT s.spid,

       BlockedBy = s.blocked,

       DBName = DB_NAME(s.dbid),

       ProgramName=s.program_name,

       LoginName=s.loginame,

       Query = CAST(TEXT AS VARCHAR(MAX))

FROM   sys.sysprocesses s

       CROSS APPLY sys.dm_exec_sql_text(sql_handle)

WHERE  s.spid > 50 — Exclude system processes

ORDER BY DBName, ProgramName

/* Partial results

select @@spid;  select SERVERPROPERTY(‘ProductLevel’); 

USE AdventureWorks2008;    DECLARE @ParmDefinition NVARCHAR(1024)

*/

— SQL find out source of blocking

SELECT SPID=p.spid,

       DBName = convert(CHAR(20),d.name),

       ProgramName = program_name,

       LoginName = convert(CHAR(20),l.name),

       HostName = convert(CHAR(20),hostname),

       Status = p.status,

       BlockedBy = p.blocked,

       LoginTime = login_time,

       QUERY = CAST(TEXT AS VARCHAR(MAX))

FROM   MASTER.dbo.sysprocesses p

       INNER JOIN MASTER.dbo.sysdatabases d

         ON p.dbid = d.dbid

       INNER JOIN MASTER.dbo.syslogins l

         ON p.sid = l.sid

       CROSS APPLY sys.dm_exec_sql_text(sql_handle)

WHERE  p.blocked = 0

       AND EXISTS (SELECT 1

                   FROM   MASTER..sysprocesses p1

                   WHERE  p1.blocked = p.spid)

/*

 SPID DBName      ProgramName LoginName   HostName    Status      BlockedBy   LoginTime      QUERY

84    AdventureWorks2008  Microsoft SQL Server Management Studio – Query                                                                                        SERVER\ksmith     DELLSTAR          sleeping                            0     2010-03-29 00:33:17.890   BEGIN TRANSACTION   insert test values (99) 

*/

Performance improvement of a query using indexes.


If a Microsoft SQL Server 2005 database has two or more tables that are frequently joined together, then the columns used for the JOINs should have an index configured. If the columns used in the join are “wide”, for example Email varchar(70), then use a surrogate key for the JOINs such as

EmailID INT IDENTITY(1,1) PRIMARY KEY

INT is 4 bytes fixed length column yields extremely good performance in JOINs. Example for index create:

— T-SQL create index – create nonclustered index on contactid column

CREATE NONCLUSTERED INDEX [idxContactPK] ON [Person].[Contact] ([ContactID] ASC)

GO

Regular index maintenance should include rebuilding indexes every weekend. Extremely dynamic OLTP tables should be rebuilt with FILLFACTOR 70-90.

— MSSQL index rebuild with fillfactor

USE AdventureWorks;

GO

ALTER INDEX ALL ON Sales.SalesOrderDetail

REBUILD WITH (FILLFACTOR = 80, SORT_IN_TEMPDB = ON);

GO

Covering indexes are good for queries which are executed very frequently, business critical or mission critical. SQL Server 2005 allows the inclusion of nonkey (not indexed) columns in covering indexing. That is, all columns referenced in the query are included in the index as either key or nonkey columns. Composite index can also cover a query (pre-2005). This allows the query optimizer to locate all the required information from an index seek or scan; the table or clustered index data will not be accessed. Example: Category and Subcategory form a composite index key. Frequent query includes Category, Subcategory, Color and Size. Including Color and Size as nonkey columns in the index will speed up the query significantly.

— SQL convering index demo – INCLUDE option in create index

USE AdventureWorks;

GO

IF EXISTS (SELECT name FROM sys.indexes

            WHERE name = N’idxContactCovering’)

    DROP INDEX idxContactCovering ON Person.Contact;

GO

— Create index covers all the columns in the following query

CREATE NONCLUSTERED INDEX idxContactCovering

    ON Person.Contact (ContactID)

    INCLUDE (FirstName, LastName, EmailAddress);

GO

— SQL covered query

SELECT FirstName+’ ‘+ LastName as FullName, EmailAddress, ContactID

FROM Person.Contact

WHERE ContactID BETWEEN N’10000′ and N’11000’

ORDER BY FullName;

GO

/* Partial results

FullName          EmailAddress                        ContactID

Alexandra Allen   alexandra90@adventure-works.com     10115

Alexandra Hall    alexandra89@adventure-works.com     10058

Alexandra Smith   alexandra92@adventure-works.com     10494

Alexandra Young   alexandra91@adventure-works.com     10181

Allison Allen     allison45@adventure-works.com       10160

*/

Covering Indexes , Create , execute in SQL Server


The Microsoft SQL Server T-SQL create index has the INCLUDE option (2005 and on) to include non-key column(s) in the index so that a specific query can be satisfied – covered – from the index without accessing table data. Composite index can also cover a query. Covering index speeds up the target query, however, due to larger size, may have negative impact on other queries.

— MSSQL query with execution timing envelope
— DBCC DROPCLEANBUFFERS forces pages to disk for consistent time measurements
— SELECT INTO table create
USE tempdb;
SELECT * INTO Address
FROM AdventureWorks2008.dbo.Address
 
DBCC DROPCLEANBUFFERS
DECLARE @StartTime datetime = getdate();
 
— TABLE SCAN (NonClustered)
— COST 0.275
SET STATISTICS IO ON
SELECT a.PostalCode,
       a.City,
       a.StateProvinceID
FROM   dbo.Address a
WHERE  a.PostalCode LIKE ‘34%’;
SET STATISTICS IO OFF
 
SELECT DurationMSEC = DATEDIFF(ms, @StartTime, getdate())
GO
— logical reads 340
— 76 msec
 
 
— T-SQL create index with non-key columns INCLUDE
— MSSQL SELECT will be covered for City & StateProvinceID (not for other colums)
CREATE INDEX idxPostalCodeCovering
ON dbo.Address (PostalCode)
INCLUDE ( City, StateProvinceID);
GO
— Command(s) completed successfully.
 
 
— SQL Server time query again after index create
DBCC DROPCLEANBUFFERS
DECLARE @StartTime datetime = getdate();
 
— INDEX SEEK (NonClustered)
— COST 0.0034
SET STATISTICS IO ON
SELECT a.PostalCode,
       a.City,
       a.StateProvinceID
FROM   dbo.Address a
WHERE  a.PostalCode LIKE ‘34%’;
SET STATISTICS IO OFF
 
SELECT DurationMSEC = DATEDIFF(ms, @StartTime, getdate())
GO
— logical reads 4,
— 10 msec
 
 
DROP INDEX dbo.Address.idxPostalCodeCovering
GO
 
— T-SQL create composite index
— MSSQL SELECT will be covered for City & StateProvinceID (not for other colums)
CREATE INDEX idxPostalCodeCoveringComposite
ON dbo.Address (PostalCode,City, StateProvinceID);
GO
— Command(s) completed successfully.
 
 
— SQL Server time query again after index create
DBCC DROPCLEANBUFFERS
DECLARE @StartTime datetime = getdate();
 
— INDEX SEEK (NonClustered)
— COST 0.0034
SET STATISTICS IO ON
SELECT a.PostalCode,
       a.City,
       a.StateProvinceID
FROM   dbo.Address a
WHERE  a.PostalCode LIKE ‘34%’;
SET STATISTICS IO OFF
 
SELECT DurationMSEC = DATEDIFF(ms, @StartTime, getdate())
GO
— logical reads 4,
— 13 msec
 
 
— Cleanup
DROP TABLE dbo.Address

Identify fragmentation for all the indexes in a database


USE AdventureWorks;

SELECT   ‘dbcc showcontig (‘ + convert(VARCHAR,i.[object_id]) + ‘,’ + — table id 

          convert(VARCHAR,i.index_id) + ‘) — ‘ + — index id 

          s.name + — schema name

          ‘.’ + object_name(i.[object_id]) + — table name 

          ‘.’ + i.name — index name

FROM     sys.schemas s

         JOIN sys.objects o

           ON (s.schema_id = o.schema_id)

         JOIN sys.indexes i

           ON (o.[object_id] = i.[object_id])

WHERE    o.TYPE = ‘U’

         AND i.TYPE IN (1,2)

ORDER BY s.name,

         object_name(i.[object_id]),

         i.index_id

GO

/* Partial results

dbcc showcontig (642101328,1) —
— Sales.SalesOrderDetail.PK_SalesOrderDetail_SalesOrderID_SalesOrderDetailID

dbcc showcontig (642101328,2) — Sales.SalesOrderDetail.AK_SalesOrderDetail_rowguid

dbcc showcontig (642101328,3) — Sales.SalesOrderDetail.IX_SalesOrderDetail_ProductID

dbcc showcontig (754101727,1) — Sales.SalesOrderHeader.PK_SalesOrderHeader_SalesOrderID

dbcc showcontig (754101727,2) — Sales.SalesOrderHeader.AK_SalesOrderHeader_rowguid

dbcc showcontig (754101727,3) —
— Sales.SalesOrderHeader.AK_SalesOrderHeader_SalesOrderNumber

*/

Identify tables without Indexes in SQL Server Database


— SQL no clustered index – SQL tables missing clustered index – SQL objectproperty

USE AdventureWorksDW;

SELECT   DISTINCT [Table] = object_name(object_id)

FROM     sys.indexes

WHERE    index_id = 0

         AND objectproperty(object_id,’IsUserTable’) = 1

         AND objectproperty(object_id,’IsMSShipped’) = 0

ORDER BY [Table]

GO

/* Results

Table

AdventureWorksDWBuildVersion

DatabaseLog

FactCurrencyRate

FactFinance

FactInternetSales

FactInternetSalesReason

FactResellerSales

FactSalesQuota

ProspectiveBuyer

*/

Reindex all fragmented indexes in a database


USE AdventureWorks;

GO

— Reindex database – reindex all tables script

— Reindex fragmented indexes – rebuild all fragmented indexes

— DBREINDEX database – REBUILD all database indexes

CREATE PROCEDURE uspDBREINDEXFragmentedTables @FragmentationLimit DECIMAL

AS

BEGIN

CREATE TABLE #IndexFragmentation (

  ObjectName     CHAR(255),

  ObjectId       INT,

  IndexName      CHAR(255),

  IndexId        INT,

  Lvl            INT,

  CountPages     INT,

  CountRows      INT,

  MinRecSize     INT,

  MaxRecSize     INT,

  AvgRecSize     INT,

  ForRecCount    INT,

  Extents        INT,

  ExtentSwitches INT,

  AvgFreeBytes   INT,

  AvgPageDensity INT,

  ScanDensity    DECIMAL,

  BestCount      INT,

  ActualCount    INT,

  LogicalFrag    DECIMAL,

  ExtentFrag     DECIMAL)

INSERT #IndexFragmentation

EXEC( ‘DBCC SHOWCONTIG WITH TABLERESULTS , ALL_INDEXES’)

DELETE #IndexFragmentation

WHERE  left(ObjectName,3) = ‘sys’

UPDATE #IndexFragmentation

SET    ObjectName = s.name + ‘.’ + o.name

FROM   #IndexFragmentation i

       JOIN sys.objects o

         ON i.ObjectID = o.object_id

       JOIN sys.schemas s

         ON o.schema_id = s.schema_id

DECLARE  @Table      SYSNAME,

         @DynamicSQL NVARCHAR(1024)

DECLARE  @objectid INT,

         @indexid  INT

DECLARE  @Fragmentation    DECIMAL

DECLARE curIndexFrag CURSOR  FOR

SELECT   ObjectName,

         LogicalFrag = max(LogicalFrag)

FROM     #IndexFragmentation

WHERE    LogicalFrag >= @FragmentationLimit

         AND indexid != 0

         AND indexid != 255

GROUP BY ObjectName

OPEN curIndexFrag

FETCH NEXT FROM curIndexFrag

INTO @Table,

     @Fragmentation

WHILE @@FETCH_STATUS = 0

  BEGIN

    SELECT @DynamicSQL = ‘DBCC DBREINDEX (”’ + RTRIM(@Table) + ”’, ””, 70)’

    PRINT @DynamicSQL

    — Partial result:

    — DBCC DBREINDEX (‘Sales.SpecialOfferProduct’, ”, 70)

    EXEC sp_executesql @DynamicSQL

    FETCH NEXT FROM curIndexFrag

    INTO @Table,

         @Fragmentation

  END

CLOSE curIndexFrag

DEALLOCATE curIndexFrag

DROP TABLE #IndexFragmentation

END

GO

— Execute database reindexing (INDEX REBUILD) stored procedure

— Reindex indexes with 35% logical fragmentation or higher

EXEC uspDBREINDEXFragmentedTables 35.0

GO

Disable and Create Triggers


————

— Disable all triggers and constraints in a database

————
— Disable trigger stored procedure – disable constraint stored procedure

USE CopyOfAdventureWorks;

— SQL disable all triggers – disable all triggers sql server – t sql disable trigger

EXEC sp_MSforeachtable @command1=”ALTER TABLE ? DISABLE TRIGGER ALL”

GO

— SQL disable all constraints – disable all constraints sql server

EXEC sp_MSforeachtable @command1=”ALTER TABLE ? NOCHECK CONSTRAINT ALL”

GO

— Enable all triggers on a table

ALTER TABLE Production.Product ENABLE TRIGGER ALL

— Enable all check contraints on a table

ALTER TABLE Production.Product CHECK CONSTRAINT ALL

GO

— SQL enable all triggers – enable all triggers sql server – t sql enable trigger

EXEC sp_MSforeachtable @command1=”ALTER TABLE ? ENABLE TRIGGER ALL”

GO

— SQL enable all constraints – enable all constraints sql server
— sp_MSforeachtable is an undocumented system stored procedure

EXEC sp_MSforeachtable @command1=”ALTER TABLE ? CHECK CONSTRAINT ALL”

GO

————

— Single constraint disable and enable

————

USE CopyOfAdventureWorks;

— SQL disable constraint – alter table remove constraint

ALTER TABLE Production.Product NOCHECK CONSTRAINT CK_Product_DaysToManufacture

GO

— SQL enable constraint

ALTER TABLE Production.Product CHECK CONSTRAINT CK_Product_DaysToManufacture

GO

— SQL enable constraint with check of current data

ALTER TABLE Production.Product  WITH CHECK

                                CHECK CONSTRAINT CK_Product_DaysToManufacture
GO

— SQL enable constraint with no check of current data

ALTER TABLE Production.Product WITH NOCHECK

CHECK CONSTRAINT CK_Product_DaysToManufacture

— Check integrity of all constraints on a table

DBCC CHECKCONSTRAINTS(‘Production.Product’);

GO

/* DBCC execution completed. If DBCC printed error messages,

   contact your system administrator.

*/

————

————

— Single trigger disable and enable

————

— SQL disable trigger – alter table disable trigger

ALTER TABLE Sales.SalesOrderHeader DISABLE TRIGGER uSalesOrderHeader

GO

— SQL enable trigger

ALTER TABLE Sales.SalesOrderHeader ENABLE TRIGGER uSalesOrderHeader

GO

————

————

— SQL Server 2008 T-SQL insert, update, delete trigger demo

————

USE tempdb;

CREATE TABLE TriggerDemo (

      ID int identity(1,1) Primary Key,

      TextData varchar (64)

)

GO

IF OBJECT_ID (‘dbo.PrintTrigger’,’TR’) IS NOT NULL

    DROP TRIGGER Sales.reminder2;

GO

CREATE TRIGGER PrintTrigger

ON TriggerDemo

AFTER INSERT, UPDATE, DELETE

AS

BEGIN

    DECLARE @InsertedMessage varchar(max) = ‘New: ‘+(SELECT TextData FROM inserted)

    DECLARE @DeletedMessage  varchar(max) = ‘Old: ‘+(SELECT TextData FROM deleted)

      PRINT @InsertedMessage

      PRINT @DeletedMessage

END

GO

INSERT TriggerDemo (TextData) VALUES (‘Enable trigger demo’)

GO

— In messages: New: Enable trigger demo

UPDATE TriggerDemo SET TextData = ‘Disable trigger demo’

GO

/*

New: Disable trigger demo

Old: Enable trigger demo

*/

DELETE TriggerDemo

GO

— Old: Disable trigger demo

DROP TABLE TriggerDemo

GO

————

————

— Create a check constraint for a table and enable it

————

— SQL create check constraint

— Range constraint – column value must be between 0 and 100

USE AdventureWorks;

ALTER TABLE [Production].[ProductInventory]  WITH CHECK

ADD  CONSTRAINT [CK_ProductInventory_Bin]

CHECK  (([Bin]>=(0) AND [Bin]<=(100)))

GO

— SQL enable check constraint

ALTER TABLE [Production].[ProductInventory]

CHECK CONSTRAINT [CK_ProductInventory_Bin]

GO

SQL Server Best Practices (T-SQL)


  • Always qualify objects by owner
  • Do not use * in SELECT statements, always specify columns
  • Query “with (nolock)” when you don’t require high transactional consistency
  • Do not use GOTO
  • Avoid cursor use. If necessary always declare the correct type of cursor (FAST_FORWARD)
  • Avoid SELECT INTO for populating temp tables. Create the table then use INSERT SELECT.
  • Always use ANSI join syntax
  • Always check for object existance
  • Use SCOPE_IDENTITY() instead of @@IDENTITY
  • Always check @@TRANCOUNT and commit/rollback as necessary
  • Order DML to avoid deadlocks
  • Always check @@ERROR and @@ROWCOUNT by assigning to a variable
  • Always check sp return values
  • Do not create cross database dependencies
  • Avoid table value UDF – performance problems
  • Avoid dynamic SQL – if necessary use sp_executesql over EXEC
  • Avoid using NULL values
  • When there are only two values, ISNULL is more efficient than COALESCE
  • Try to avoid “SELECT *”, except these two cases: “WHERE EXISTS (SELECT * …)” and “SELECT COUNT(*) …”

Script to Backup all databases in SQL Server


Backup all the databases in SQL Server:

 

DECLARE @name VARCHAR(50-- database name 
DECLARE @path VARCHAR(256-- path for backup files 
DECLARE @fileName VARCHAR(256-- filename for backup 
DECLARE @fileDate VARCHAR(20-- used for file name

SET @path 'C:\Backup\' 

SELECT @fileDate CONVERT(VARCHAR(20),GETDATE(),112)

DECLARE db_cursor CURSOR FOR 
SELECT name 
FROM master.dbo.sysdatabases 
WHERE name NOT IN ('master','model','msdb','tempdb'

OPEN db_cursor  
FETCH NEXT FROM db_cursor INTO @name  

WHILE @@FETCH_STATUS 0  
BEGIN  
       SET @fileName @path @name '_' @fileDate '.BAK' 
       BACKUP DATABASE @name TO DISK = @fileName 

       FETCH NEXT FROM db_cursor INTO @name  
END  

CLOSE db_cursor  
DEALLOCATE db_cursor

Transfer Logins from one server to another in SQL Server


Script to transfer logins from one server to another

   

Here’s the script I use to move logins, along with the script I use to resynch login IDs and database user IDs after moving the logins. I did not write either one, but I have made some modifications to both of them for my own purposes. I would give credit to the original authors of these scripts, but I don’t know who they are. This works fine in SQL Server 7.0 and 2000, but I have not tried it with 2005 yet.

Script to move login IDs from one server to another

USE database_name    –Change to active database name
GO

IF OBJECT_ID (‘sp_hexadecimal’) IS NOT NULL
  DROP PROCEDURE sp_hexadecimal
GO

CREATE PROCEDURE sp_hexadecimal
    @binvalue varbinary(256),
    @hexvalue varchar(256) OUTPUT
AS
DECLARE @charvalue varchar(255)
DECLARE @i int
DECLARE @length int
DECLARE @hexstring char(16)
SELECT @charvalue = ‘0x’
SELECT @i = 1
SELECT @length = DATALENGTH (@binvalue)
SELECT @hexstring = ‘0123456789ABCDEF’
WHILE (@i <= @length)
BEGIN
  DECLARE @tempint int
  DECLARE @firstint int
  DECLARE @secondint int
  SELECT @tempint = CONVERT(int, SUBSTRING(@binvalue,@i,1))
  SELECT @firstint = FLOOR(@tempint/16)
  SELECT @secondint = @tempint – (@firstint*16)
  SELECT @charvalue = @charvalue +
    SUBSTRING(@hexstring, @firstint+1, 1) +
    SUBSTRING(@hexstring, @secondint+1, 1)
  SELECT @i = @i + 1
END
SELECT @hexvalue = @charvalue
GO

IF OBJECT_ID (‘sp_help_revlogin’) IS NOT NULL
  DROP PROCEDURE sp_help_revlogin
GO

CREATE PROCEDURE sp_help_revlogin @login_name sysname = NULL AS
DECLARE @name    sysname
DECLARE @xstatus int
DECLARE @binpwd  varbinary (255)
DECLARE @txtpwd  sysname
DECLARE @tmpstr  varchar (255)
DECLARE @logincount  int
SET @logincount = 0

IF (@login_name IS NULL)
  DECLARE login_curs CURSOR FOR
    SELECT name, xstatus, passwordFROM master..sysxlogins
    WHERE srvid IS NULL AND name <> ‘sa’
ELSE
  DECLARE login_curs CURSOR FOR
    SELECT name, xstatus, password FROM master..sysxlogins
    WHERE srvid IS NULL AND name = @login_name
OPEN login_curs
FETCH NEXT FROM login_curs INTO @name, @xstatus, @binpwd
IF (@@fetch_status = -1)
BEGIN
  PRINT ‘No login(s) found.’
  CLOSE login_curs
  DEALLOCATE login_curs
  RETURN -1
END
SET @tmpstr = ‘/* sp_help_revlogin script ‘
PRINT @tmpstr
SET @tmpstr = ‘** Generated ‘
  + CONVERT (varchar, GETDATE()) + ‘ on ‘ + @@SERVERNAME + ‘ */’
PRINT @tmpstr
PRINT ”
PRINT ‘DECLARE @pwd sysname’
WHILE (@@fetch_status <> -1)
BEGIN
  IF (@@fetch_status <> -2)
  BEGIN
    PRINT ”
    SET @tmpstr = ‘– Login: ‘ + @name
    PRINT @tmpstr
    IF (@xstatus & 4) = 4
    BEGIN — NT authenticated account/group
      IF (@xstatus & 1) = 1
      BEGIN — NT login is denied access
        SET @tmpstr = ‘EXEC master..sp_denylogin ”’ + @name + ””
        PRINT @tmpstr
      END
      ELSE BEGIN — NT login has access
        SET @tmpstr = ‘EXEC master..sp_grantlogin ”’ + @name + ””
        PRINT @tmpstr
      END
    END
    ELSE BEGIN — SQL Server authentication
      IF (@binpwd IS NOT NULL)
      BEGIN — Non-null password
        EXEC sp_hexadecimal @binpwd, @txtpwd OUT
        IF (@xstatus & 2048) = 2048
          SET @tmpstr = ‘SET @pwd = CONVERT (varchar, ‘ + @txtpwd + ‘)’
        ELSE
          SET @tmpstr = ‘SET @pwd = CONVERT (varbinary, ‘ + @txtpwd + ‘)’
        PRINT @tmpstr
        SET @tmpstr = ‘EXEC master..sp_addlogin ”’ + @name
          + ”’, @pwd, @encryptopt = ‘
      END
      ELSE BEGIN
        — Null password
        SET @tmpstr = ‘EXEC master..sp_addlogin ”’ + @name
          + ”’, NULL, @encryptopt = ‘
      END
      IF (@xstatus & 2048) = 2048
        — login upgraded from 6.5
        SET @tmpstr = @tmpstr + ”’skip_encryption_old”’
      ELSE
        SET @tmpstr = @tmpstr + ”’skip_encryption”’
      PRINT @tmpstr
    END
  END
  FETCH NEXT FROM login_curs INTO @name, @xstatus, @binpwd
  SET @logincount = @logincount + 1
  END
CLOSE login_curs
DEALLOCATE login_curs
PRINT ”
PRINT ”
PRINT ‘– Total logins scripted = ‘ + CAST(@logincount AS varchar)
RETURN 0
GO

EXEC sp_help_revlogin
GO

IF OBJECT_ID (‘sp_hexadecimal’) IS NOT NULL
  DROP PROCEDURE sp_hexadecimal
GO

IF OBJECT_ID (‘sp_help_revlogin’) IS NOT NULL
  DROP PROCEDURE sp_help_revlogin
GO

Script to resync orphan SQL Server login IDs and database user IDs

USE database_name    –Change to active database name
GO

DECLARE @UserName nvarchar(255)
DECLARE orphanuser_cur cursor for
 SELECT UserName = name
 FROM sysusers
 WHERE issqluser = 1 and (sid is not null and sid <> 0x0) and
suser_sname(sid) is null
 ORDER BY name

OPEN orphanuser_cur
FETCH NEXT FROM orphanuser_cur INTO @UserName

WHILE (@@fetch_status = 0)
BEGIN
 PRINT @UserName + ‘ user name being resynced’

 EXEC sp_change_users_login ‘Update_one’, @UserName, @UserName

 FETCH NEXT FROM orphanuser_cur INTO @UserName
END

CLOSE orphanuser_cur
DEALLOCATE orphanuser_cur

SQL Server Table design and optimization process


Table design and tips in SQL Server

  • Normalize your tables to the third normal form.
    A table is in third normal form (3NF) if it is in second normal form (2NF) and if it does not contain transitive dependencies. In most cases, you should normalize your tables to the third normal form. The normalization is used to reduce the total amount of redundant data in the database. The less data there is, the less work SQL Server has to perform, speeding its performance.

 

  • Consider the denormalization of your tables from the forth or fifth normal forms to the third normal form.
    Normalization to the forth and fifth normal forms can result in some performance degradation, especially when you need to perform many joins against several tables. It may be necessary to denormalize your tables to prevent performance degradation.

 

  • Consider horizontal partitioning of the very large tables into the current and the archives versions.
    The less space used, the smaller the table, the less work SQL Server has to perform to evaluate your queries. For example, if you need to query only data for the current year in your daily work, and you need all the data only once per month for the monthly report, you can create two tables: one with the current year’s data and one with the old data.

 

  • Create the table’s columns as narrow as possible.
    This can reduce the table’s size and improve performance of your queries as well as some maintenance tasks (such as backup, restore and so on).

 

  • Try to reduce the number of columns in a table.
    The fewer the number of columns in a table, the less space the table will use, since more rows will fit on a single data page, and less I/O overhead will be required to access the table’s data.

 

  • Try to use constraints instead of triggers, rules, and defaults whenever possible.
    Constraints are much more efficient than triggers and can boost performance. Constraints are more consistent and reliable in comparison to triggers, rules and defaults, because you can make errors when you write your own code to perform the same actions as the constraints.

 

  • If you need to store integer data from 0 through 255, use tinyint data type.
    The columns with tinyint data type use only one byte to store their values, in comparison with two bytes, four bytes and eight bytes used to store the columns with smallint, int and bigint data types accordingly. For example, if you design tables for a small company with 5-7 departments, you can create the departments table with the DepartmentID tinyint column to store the unique number of each department.

 

  • If you need to store integer data from -32,768 through 32,767, use smallint data type.
    The columns with smallint data type use only two bytes to store their values, in comparison with four bytes and eight bytes used to store the columns with int and bigint data types respectively. For example, if you design tables for a company with several hundred employees, you can create an employee table with the EmployeeID smallint column to store the unique number of each employee.

 

  • If you need to store integer data from -2,147,483,648 through 2,147,483,647, use int data type.
    The columns with int data type use only four bytes to store their values, in comparison with eight bytes used to store the columns with bigint data types. For example, to design tables for a library with more than 32,767 books, create a books table with a BookID int column to store the unique number of each book.

 

  • Use smallmoney data type instead of money data type, if you need to store monetary data values from 214,748.3648 through 214,748.3647.
    The columns with smallmoney data type use only four bytes to store their values, in comparison with eight bytes used to store the columns with money data types. For example, if you need to store the monthly employee payments, it might be possible to use a column with the smallmoney data type instead of money data type.

 

  • Use smalldatetime data type instead of datetime data type, if you need to store the date and time data from January 1, 1900 through June 6, 2079, with accuracy to the minute.
    The columns with smalldatetime data type use only four bytes to store their values, in comparison with eight bytes used to store the columns with datetime data types. For example, if you need to store the employee’s hire date, you can use column with the smalldatetime data type instead of datetime data type.

 

  • Use varchar/nvarchar columns instead of text/ntext columns whenever possible.
    Because SQL Server stores text/ntext columns on the Text/Image pages separately from the other data, stored on the Data pages, it can take more time to get the text/ntext values.

 

  • Use char/varchar columns instead of nchar/nvarchar if you do not need to store unicode data.
    The char/varchar value uses only one byte to store one character, the nchar/nvarchar value uses two bytes to store one character, so the char/varchar columns use two times less space to store data in comparison with nchar/nvarchar columns.

 

  • Consider setting the ‘text in row’ SQL Server 2000 table’s option.
    The text, ntext, and image values are stored on the Text/Image pages, by default. This option specifies that small text, ntext, and image values will be placed on the Data pages with other data values in a data row. This can increase the speed of read and write operations and reduce the amount of space used to store small text, ntext, and image data values. You can set the ‘text in row’ table option by using the sp_tableoption stored procedure.

 

  • If you work with SQL Server 2000, use cascading referential integrity constraints instead of triggers whenever possible.
    For example, if you need to make cascading deletes or updates, specify the ON DELETE or ON UPDATE clause in the REFERENCES clause of the CREATE TABLE or ALTER TABLE statements. The cascading referential integrity constraints are much more efficient than triggers and can boost performance.

How to Optimize Database Replication in SQL Server


 

Optimization and tricks in SQL Server Database Replication

  • Avoid publishing unnecessary data.
    Try to restrict the amount of published data. This can results in good performance benefits, because SQL Server will publish only the amount of data required. This can reduce network traffic and boost the overall replication performance.

 

  • Place the published database log and distribution database log on the separate disk drives.
    Because logging is more write-intensive, it is important that the disk arrays containing the SQL Server log files have sufficient disk I/O performance.

 

  • Do not configure the distribution database to expand or shrink automatically.
    Microsoft recommends to set a fixed size for the distribution database. Setting a database to automatically grow results in some performance degradation, therefore you should set a reasonable initial size of the distribution database.

 

  • Place the distribution component of replication on its own dedicated server.
    This topology is used for performance reasons when the level of replication activity increases or the server resources become constrained. It reduces Publisher loading, but it increases overall network traffic. This topology requires separate Microsoft SQL Server installations, one for the Publisher and one for the Distributor.

 

  • Run the Snapshot Agent as infrequently as possible.
    The Snapshot Agent bulk copies data from the Publisher to the Distributor, which results in some performance degradation. So, try to schedule it during CPU idle time and slow production periods.

 

  • Avoid using continuous replication.
    Try to schedule replication to occur at regular intervals instead of using continuous replication.

 

  • Avoid replicating text, ntext and image columns.
    These data types require more storage space and processing than other column data types.

 

  • Replicate the execution of stored procedures when a large number of rows are affected.
    For example, instead of replicating a very large number of insert, update and delete statements, you can create stored procedure, which will contain all these statements, and replicate to subscriber only the execution of this stored procedure. This can reduce network traffic and boost the overall replication performance.

 

  • Set the “Maximize Throughput for Network Applications” option.
    This can increase SQL Server performance, because Windows NT will allocate more RAM to SQL Server than to its file cache. To set this option, you can do the following:
    1. Double-click the Network icon in Control Panel.
    2. Click the Services tab.
    3. Click Server to select it, and then click the Properties button.
    4. Click Maximize Throughput for Network Applications, and then click OK.
    5. Restart the computer.

 

  • Specify the ‘min server memory’ options.
    This option is used to set a minimum amount of memory allocated to SQL Server. Microsoft recommends that the ‘min server memory’ options be set to at least 16 MB of memory to avoid low memory availability during replication activities, if the server is a remote Distributor or a combined Publisher and Distributor. You can also change these options when SQL Server works on the same computer with other applications. In this case, the ‘min server memory’ option is used to allow SQL Server works when other applications pretend to use all available memory.

 

  • If you work with SQL Server 2000 in a central publisher with remote distributor topology (when the distribution component of replication resides on its own dedicated server) and Publisher connected with the Distributor over slow LAN or WAN, consider compressing the snapshot files.
    This is a new SQL Server 2000 replication feature, which allows you to decrease network traffic by compressing snapshot files.

 

  • Try to enable pull or anonymous subscriptions to increase the Distributor performance.
    This can increase the Distributor performance, because Distribution Agent processing will be moved from the Distributor to Subscribers.

 

  • Increase the MaxBcpThreads property of the Snapshot Agent.
    This property specifies the number of bulk copy operations that can be performed in parallel. By increasing this value, bulk copy operations can run faster, because they will be perform in parallel. To increase the MaxBcpThreads value in the Snapshot Agent profile, you can do the following:
    1. Run SQL Server Enterprise Manager.
    2. Expand a server group; then expand a server.
    3. Expand Replication Monitor; then expand the Agents and click the Snapshot Agents folder.
    4. Right-click appropriate publication and select Agent Profiles…
    5. Click the New Profile button to create the new profile with the appropriate MaxBcpThreads value.
    6. Choose the newly created profile.
    Note. Do not set this property too high, it can results in some performance degradation, because SQL Server will have to spend extra time managing the extra threads. Increase this property to 2 and continue monitoring.

 

  • Set the OutputVerboseLevel property of the Distribution Agent, the Log Reader Agent, the Merge Agent, and the Snapshot Agent to 0.
    This property specifies whether the output should be verbose. There are three available values:
    0 – only error messages are printed
    1 – all of the progress report messages are printed
    2 – all error messages and progress report messages are printed
    The default value is 2. You can increase performance by printed only error messages.
    To set the OutputVerboseLevel value to 0, you can do the following:
    1. Run SQL Server Enterprise Manager.
    2. Expand a server group; then expand a server.
    3. Expand Replication Monitor; then expand the Agents and click the appropriate agent folder.
    4. Right-click appropriate publication and select Agent Properties…
    5. On the Steps tab, double-click the Run agent step, and then add the -OutputVerboseLevel 0 in the Command text box.

 

  • You can minimize the performance affect of history logging by selecting 1 for the HistoryVerboseLevel property of the Distribution Agent, the Log Reader Agent, the Merge Agent, and the Snapshot Agent.
    This property specifies the amount of history logged during distribution operation (for a Distribution Agent), during a log reader operation (for a Log Reader Agent), during a merge operation (for a Merge Agent), or during a snapshot operation (for a Snapshot Agent).
    To set the HistoryVerboseLevel value to 1, you can do the following:
    1. Run SQL Server Enterprise Manager.
    2. Expand a server group; then expand a server.
    3. Expand Replication Monitor; then expand the Agents and click the appropriate agent folder.
    4. Right-click appropriate publication and select Agent Properties…
    5. On the Steps tab, double-click the Run agent step, and then add the -HistoryVerboseLevel 1 in the Command text box.

 

  • If you work with SQL Server 2000 consider using the -UseInprocLoader agent property.
    If this option was set, the in-process BULK INSERT command will be used when applying snapshot files to the Subscriber. You cannot use this property with character mode bcp, this property cannot be used by OLE DB or ODBC Subscribers.
    To set the UseInprocLoader property, you can do the following:
    1. Run SQL Server Enterprise Manager.
    2. Expand a server group; then expand a server.
    3. Expand Replication Monitor; then expand the Agents and click the Distribution Agents or Merge Agents folder.
    4. Right-click appropriate publication and select Agent Properties…
    5. On the Steps tab, double-click the subscription agent step, and then add the -UseInprocLoader property in the Command text box.

 

  • Increase the Log Reader Agent ReadBatchSize parameter.
    This parameter specifies the maximum number of transactions read out of the transaction log of the publishing database. The default value is 500. This option should be used when a large number of transactions are written to a publishing database, but only a small subset of those are marked for replication.

 

  • If you work with transactional replication, increase the Distribution Agent CommitBatchSize parameter.
    This parameter specifies the number of transactions to be issued to the Subscriber before a COMMIT statement is issued. The default value is 100.

 

  • Create an index on each of the columns that is used in the filter’s WHERE clause.
    If you do not use indexes on columns used in filters, then SQL Server must perform a table scan.

 

  • If you work with merge replication, use static instead of dynamic filters.
    Because SQL Server requires more overhead to process the dynamic filters than static filters, for best performance you should use static filters, whenever possible.

How to Optimize Backup and Restore in SQL Server


 

Optimizationd and rules for SQL Backup and Restore, Plans.

 

  • Try to perform backup to the local hard disk first, and copy backup file(s) to the tape later.
    When you perform backup, some SQL Server commands cannot be made, for example: during backup you cannot run ALTER DATABASE statement with either the ADD FILE or REMOVE FILE options, you cannot shrink database, you cannot run CREATE INDEX statement and so on. So, to decrease the backup operation’s time, you can perform backup to the local hard disk first, and then copy backup file(s) to the tape, because tape device usually much more slow than hard disks. The smaller backup operation’s time is, the less impact there will be on the server when the backup occurs.

 

  • Perform backup on multiple backup devices.
    Using multiple backup devices forces SQL Server to create a separate backup thread for each backup device, so the backups will be written to all backup devices in parallel.

 

  • Perform backup on a physical disk array, so the more disks in array the more quickly the backup will be made.
    This can improve performance because a separate thread will be created for each backup device on each disk in order to write the backup’s data in parallel.

 

  • Perform backups during periods of low database access.
    Because backup is very resource effective, try to schedule it during CPU idle time and slow production periods.

 

  • Use full backup to minimize the time to restore databases.
    The full backups take the longest to perform in comparison with differential and incremental backups, but are the fastest to restore.

 

  • Use incremental backup to minimize the time to backup databases.
    The incremental backups take the fastest to perform in comparison with full and differential backups, but are the longest to restore.

 

  • Use differential backup instead of incremental backup when the users update the same data many times.
    Because a differential backup captures only those data pages that have changed after the last database backup, you can eliminate much of the time the server spends rolling transactions forward when recovering transaction logs from the incremental backups. Using differential backup, in this case, can improve the recovery process in several times.

 

  • Try to separate your database to different files and filegroups to backing up only appropriate file/filegroup.
    This can results in smaller backup operation’s time. The smaller backup operation’s time is, the less impact there will be on the server when the backup occurs.

 

  • Use Windows NT Performance Monitor or Windows 2000 System Monitor to check a backup impact on the total system performance.
    You can verify the following counters: SQL Server Backup Device: Device Throughput Bytes/sec to determine the throughput of specific backup devices, rather than the entire database backup or restore operation; SQL Server Databases: Backup/Restore Throughput/sec to monitor the throughput of the entire database backup or restore operation; PhysicalDisk: % Disk Time to monitors the percentage of time that the disk is busy with read/write activity; Physical Disk Object: Avg. Disk Queue Length to determine how many system requests on average are waiting for disk access.

 

  • To decrease the backup operation’s time consider backing up more often.
    The more often you will make backup, the smaller they will be, and the less impact there will be on the server when the backup occurs. So, to avoid locking users for a long time during everyday work, you can perform backup more often.
    Note. The more often you will make backup, the less data you will lost if the database becomes corrupt.

 

  • Place a tape drive on another SCSI bus as disks or a CD-ROM drive.
    The tape drives perform better if they have a dedicated SCSI bus for each tape drive used. Using separate SCSI bus for a tape drive can results in maximum backup performance and prevents conflicts with other drive array access. Microsoft recommends using dedicated SCSI bus for the tape drives whose native transfer rate exceeds 50 percent of the SCSI bus speed.

 

  • Use SQL Server 2000 snapshot backups for the very large databases.
    The SQL Server 2000 snapshot backup and restore technologies work in conjunction with third party hardware and software vendors. The main advantages of snapshot backups and restores are that they can be done in a very short time, typically measured in seconds, not hours, and reduce the backup/restore impact on the overall server performance. The snapshot backups accomplished by splitting a mirrored set of disks or creating a copy of a disk block when it is written and required the special hardware and software.

How to Optimize SQL Stored Procedures


Optimization and rules for SQL Stored Procedures

  •  Use stored procedures instead of heavy-duty queries.
    This can reduce network traffic, because your client will send to server only stored procedure name (perhaps with some parameters) instead of large heavy-duty queries text. Stored procedures can be used to enhance security and conceal underlying data objects also. For example, you can give the users permission to execute the stored procedure to work with the restricted set of the columns and data.

 

  •  Include the SET NOCOUNT ON statement into your stored procedures to stop the message indicating the number of rows affected by a Transact-SQL statement.
    This can reduce network traffic, because your client will not receive the message indicating the number of rows affected by a Transact-SQL statement

 

  •  Call stored procedure using its fully qualified name.
    The complete name of an object consists of four identifiers: the server name, database name, owner name, and object name. An object name that specifies all four parts is known as a fully qualified name. Using fully qualified names eliminates any confusion about which stored procedure you want to run and can boost performance because SQL Server has a better chance to reuse the stored procedures execution plans if they were executed using fully qualified names.

 

  •  Consider returning the integer value as an RETURN statement instead of an integer value as part of a recordset.
    The RETURN statement exits unconditionally from a stored procedure, so the statements following RETURN are not executed. Though the RETURN statement is generally used for error checking, you can use this statement to return an integer value for any other reason. Using RETURN statement can boost performance because SQL Server will not create a recordset.

 

  •  Don’t use the prefix “sp_” in the stored procedure name if you need to create a stored procedure to run in a database other than the master database.
    The prefix “sp_” is used in the system stored procedures names. Microsoft does not recommend to use the prefix “sp_” in the user-created stored procedure name, because SQL Server always looks for a stored procedure beginning with “sp_” in the following order: the master database, the stored procedure based on the fully qualified name provided, the stored procedure using dbo as the owner, if one is not specified. So, when you have the stored procedure with the prefix “sp_” in the database other than master, the master database is always checked first, and if the user-created stored procedure has the same name as a system stored procedure, the user-created stored procedure will never be executed.

 

  • Use the sp_executesql stored procedure instead of the EXECUTE statement.
    The sp_executesql stored procedure supports parameters. So, using the sp_executesql stored procedure instead of the EXECUTE statement improve readability of your code when there are many parameters are used. When you use the sp_executesql stored procedure to executes a Transact-SQL statements that will be reused many times, the SQL Server query optimizer will reuse the execution plan it generates for the first execution when the change in parameter values to the statement is the only variation.

 

  • Use sp_executesql stored procedure instead of temporary stored procedures.
    Microsoft recommends to use the temporary stored procedures when connecting to earlier versions of SQL Server that do not support the reuse of execution plans. Applications connecting to SQL Server 7.0 or SQL Server 2000 should use the sp_executesql system stored procedure instead of temporary stored procedures to have a better chance to reuse the execution plans.

 

  • If you have a very large stored procedure, try to break down this stored procedure into several sub-procedures, and call them from a controlling stored procedure.
    The stored procedure will be recompiled when any structural changes were made to a table or view referenced by the stored procedure (for example, ALTER TABLE statement), or when a large number of INSERTS, UPDATES or DELETES are made to a table referenced by a stored procedure. So, if you break down a very large stored procedure into several sub-procedures, you get chance that only a single sub-procedure will be recompiled, but other sub-procedures will not.

 

  • Try to avoid using temporary tables inside your stored procedure.
    Using temporary tables inside stored procedure reduces the chance to reuse the execution plan.

 

  • Try to avoid using DDL (Data Definition Language) statements inside your stored procedure.
    Using DDL statements inside stored procedure reduces the chance to reuse the execution plan.

 

  •   Add the WITH RECOMPILE option to the CREATE PROCEDURE statement if you know that your query will vary each time it is run from the stored procedure.
    The WITH RECOMPILE option prevents reusing the stored procedure execution plan, so SQL Server does not cache a plan for this procedure and the procedure is recompiled at run time. Using the WITH RECOMPILE option can boost performance if your query will vary each time it is run from the stored procedure because in this case the wrong execution plan will not be used.

 

  • Use SQL Server Profiler to determine which stored procedures has been recompiled too often.
    To check the stored procedure has been recompiled, run SQL Server Profiler and choose to trace the event in the “Stored Procedures” category called “SP:Recompile”. You can also trace the event “SP:StmtStarting” to see at what point in the procedure it is being recompiled. When you identify these stored procedures, you can take some correction actions to reduce or eliminate the excessive recompilations.

Identify mostly used Indexes in SQL Server


To identify the most used indexes in your DB run the following query. This will help you to identify whether or not your indexes are useful and used.

declare @dbid int
–To get Datbase ID
set @dbid = db_id()

select
db_name(d.database_id) database_name
,object_name(d.object_id) object_name
,s.name index_name,
c.index_columns
,d.*
from sys.dm_db_index_usage_stats d
inner join sys.indexes s
on d.object_id = s.object_id
and d.index_id = s.index_id
left outer join
(select distinct object_id, index_id,
stuff((SELECT ‘,’+col_name(object_id,column_id ) as ‘data()’ FROM sys.index_columns t2 where t1.object_id =t2.object_id and t1.index_id = t2.index_id FOR XML PATH (”)),1,1,”)
as ‘index_columns’ FROM sys.index_columns t1 ) c on
c.index_id = s.index_id and c.object_id = s.object_id
where database_id = @dbid
and s.type_desc = ‘NONCLUSTERED’
and objectproperty(d.object_id, ‘IsIndexable’) = 1
order by
(user_seeks+user_scans+user_lookups+system_seeks+system_scans+system_lookups) desc

Query to get size of all the databases in SQL Server


Script to get size of all the databases in SQL Server Instance

/*– SQL Server 2000 only.
— Display the Database ID, Database Name, Logical File Name,
— MB Size on Disk, GB Size on Disk and Physical File Name
— for all databases in this instance.

*/
use master;
go

select
db.[dbid] as ‘DB ID’
,db.[name] as ‘Database Name’
,af.[name] as ‘Logical Name’
–,af.[size] as ‘File Size (in 8-kilobyte (KB) pages)’
,(((CAST(af.[size] as DECIMAL(18,4)) * 8192) /1024) /1024) as ‘File Size (MB)’
,((((CAST(af.[size] as DECIMAL(18,4)) * 8192) /1024) /1024) /1024) as ‘File Size (GB)’
,af.[filename] as ‘Physical Name’
from sysdatabases db
inner join sysaltfiles af
on db.dbid = af.dbid
where [fileid] in (1,2);

/*– SQL Server 2005 only.
— Display the Database ID, Database Name, Logical File Name,
— MB Size on Disk, GB Size on Disk and Physical File Name
— for all databases in this instance.

*/
use master;
go

select
db.[dbid] as ‘DB ID’
,db.[name] as ‘Database Name’
,af.[name] as ‘Logical Name’
–,af.[size] as ‘File Size (in 8-kilobyte (KB) pages)’
,(((CAST(af.[size] as DECIMAL(18,4)) * 8192) /1024) /1024) as ‘File Size (MB)’
,((((CAST(af.[size] as DECIMAL(18,4)) * 8192) /1024) /1024) /1024) as ‘File Size (GB)’
,af.[filename] as ‘Physical Name’
from sys.sysdatabases db
inner join sys.sysaltfiles af
on db.dbid = af.dbid
where [fileid] in (1,2);