diff -urNp x-ref/drivers/block/ll_rw_blk.c x/drivers/block/ll_rw_blk.c
--- x-ref/drivers/block/ll_rw_blk.c	2003-01-30 16:58:45.000000000 +0100
+++ x/drivers/block/ll_rw_blk.c	2003-01-30 16:58:54.000000000 +0100
@@ -118,6 +118,13 @@ int * max_readahead[MAX_BLKDEV];
  */
 int * max_sectors[MAX_BLKDEV];
 
+/*
+ * blkdev_varyio indicates if variable size IO can be done on a device.
+ *
+ * Currently used for doing variable size IO on RAW devices.
+ */
+char * blkdev_varyio[MAX_BLKDEV];
+
 unsigned long blk_max_low_pfn, blk_max_pfn;
 int blk_nohighio = 0;
 
@@ -1181,11 +1188,11 @@ void generic_make_request (int rw, struc
  * This is is appropriate for IO requests that come from the buffer
  * cache and page cache which (currently) always use aligned blocks.
  */
-void submit_bh(int rw, struct buffer_head * bh)
+void __submit_bh(int rw, struct buffer_head * bh, unsigned long blocknr)
 {
 	int count = bh->b_size >> 9;
 
-	if (!test_bit(BH_Lock, &bh->b_state))
+	if (unlikely(!test_bit(BH_Lock, &bh->b_state)))
 		BUG();
 
 	set_bit(BH_Req, &bh->b_state);
@@ -1196,7 +1203,7 @@ void submit_bh(int rw, struct buffer_hea
 	 * further remap this.
 	 */
 	bh->b_rdev = bh->b_dev;
-	bh->b_rsector = bh->b_blocknr * count;
+	bh->b_rsector = blocknr;
 
 	generic_make_request(rw, bh);
 
@@ -1503,6 +1510,7 @@ int __init blk_dev_init(void)
 	return 0;
 };
 
+EXPORT_SYMBOL(submit_bh_blknr);
 EXPORT_SYMBOL(io_request_lock);
 EXPORT_SYMBOL(end_that_request_first);
 EXPORT_SYMBOL(end_that_request_last);
diff -urNp x-ref/drivers/char/raw.c x/drivers/char/raw.c
--- x-ref/drivers/char/raw.c	2002-08-09 14:52:09.000000000 +0200
+++ x/drivers/char/raw.c	2003-01-30 16:58:54.000000000 +0100
@@ -23,6 +23,7 @@ typedef struct raw_device_data_s {
 	struct block_device *binding;
 	int inuse, sector_size, sector_bits;
 	struct semaphore mutex;
+	unsigned varyio;
 } raw_device_data_t;
 
 static raw_device_data_t raw_devices[256];
@@ -119,6 +120,7 @@ int raw_open(struct inode *inode, struct
 	if (raw_devices[minor].inuse++)
 		goto out;
 
+	raw_devices[minor].varyio = get_blkdev_varyio(rdev);
 	/* 
 	 * Don't interfere with mounted devices: we cannot safely set
 	 * the blocksize on a device which is already mounted.  
@@ -128,6 +130,7 @@ int raw_open(struct inode *inode, struct
 	if (is_mounted(rdev)) {
 		if (blksize_size[MAJOR(rdev)])
 			sector_size = blksize_size[MAJOR(rdev)][MINOR(rdev)];
+		 raw_devices[minor].varyio = 0;
 	} else {
 		if (hardsect_size[MAJOR(rdev)])
 			sector_size = hardsect_size[MAJOR(rdev)][MINOR(rdev)];
@@ -135,6 +138,7 @@ int raw_open(struct inode *inode, struct
 
 	set_blocksize(rdev, sector_size);
 	raw_devices[minor].sector_size = sector_size;
+	filp->f_iobuf->varyio = raw_devices[minor].varyio;
 
 	for (sector_bits = 0; !(sector_size & 1); )
 		sector_size>>=1, sector_bits++;
@@ -322,6 +326,7 @@ ssize_t	rw_raw_dev(int rw, struct file *
 		if (err)
 			goto out;
 		new_iobuf = 1;
+		iobuf->varyio = raw_devices[minor].varyio;
 	}
 
 	dev = to_kdev_t(raw_devices[minor].binding->bd_dev);
diff -urNp x-ref/drivers/scsi/aic7xxx/aic7xxx_host.h x/drivers/scsi/aic7xxx/aic7xxx_host.h
--- x-ref/drivers/scsi/aic7xxx/aic7xxx_host.h	2002-11-29 02:23:07.000000000 +0100
+++ x/drivers/scsi/aic7xxx/aic7xxx_host.h	2003-01-30 16:58:54.000000000 +0100
@@ -91,6 +91,7 @@ int		 ahc_linux_abort(Scsi_Cmnd *);
 	use_clustering: ENABLE_CLUSTERING,			\
 	use_new_eh_code: 1,					\
 	highmem_io: 1,						\
+	vary_io: 1,						\
 }
 
 #endif /* _AIC7XXX_HOST_H_ */
diff -urNp x-ref/drivers/scsi/hosts.h x/drivers/scsi/hosts.h
--- x-ref/drivers/scsi/hosts.h	2003-01-29 06:14:00.000000000 +0100
+++ x/drivers/scsi/hosts.h	2003-01-30 16:59:34.000000000 +0100
@@ -297,6 +297,11 @@ typedef struct	SHT
     unsigned highmem_io:1;
 
     /*
+     * True for drivers which can handle variable length IO
+     */
+    unsigned vary_io:1;
+
+    /*
      * Name of proc directory
      */
     char *proc_name;
diff -urNp x-ref/drivers/scsi/qlogicisp.h x/drivers/scsi/qlogicisp.h
--- x-ref/drivers/scsi/qlogicisp.h	1999-11-12 13:40:46.000000000 +0100
+++ x/drivers/scsi/qlogicisp.h	2003-01-30 16:58:54.000000000 +0100
@@ -84,7 +84,8 @@ int isp1020_biosparam(Disk *, kdev_t, in
 	cmd_per_lun:		1,					   \
 	present:		0,					   \
 	unchecked_isa_dma:	0,					   \
-	use_clustering:		DISABLE_CLUSTERING			   \
+	use_clustering:		DISABLE_CLUSTERING,			   \
+	vary_io:		1,					   \
 }
 
 #endif /* _QLOGICISP_H */
diff -urNp x-ref/drivers/scsi/sd.c x/drivers/scsi/sd.c
--- x-ref/drivers/scsi/sd.c	2003-01-29 06:14:00.000000000 +0100
+++ x/drivers/scsi/sd.c	2003-01-30 16:58:54.000000000 +0100
@@ -95,6 +95,7 @@ static int *sd_sizes;
 static int *sd_blocksizes;
 static int *sd_hardsizes;	/* Hardware sector size */
 static int *sd_max_sectors;
+static char *sd_varyio;
 
 static int check_scsidisk_media_change(kdev_t);
 static int fop_revalidate_scsidisk(kdev_t);
@@ -1140,6 +1141,12 @@ static int sd_init()
 	if (!sd_max_sectors)
 		goto cleanup_max_sectors;
 
+	sd_varyio = kmalloc((sd_template.dev_max << 4), GFP_ATOMIC);
+	if (!sd_varyio)
+		goto cleanup_varyio;
+
+	memset(sd_varyio, 0, (sd_template.dev_max << 4)); 
+
 	for (i = 0; i < sd_template.dev_max << 4; i++) {
 		sd_blocksizes[i] = 1024;
 		sd_hardsizes[i] = 512;
@@ -1204,6 +1211,8 @@ cleanup_gendisks_de_arr:
 	kfree(sd_gendisks);
 	sd_gendisks = NULL;
 cleanup_sd_gendisks:
+	kfree(sd_varyio);
+cleanup_varyio:
 	kfree(sd_max_sectors);
 cleanup_max_sectors:
 	kfree(sd_hardsizes);
@@ -1268,6 +1277,8 @@ static int sd_detect(Scsi_Device * SDp)
 	return 1;
 }
 
+#define SD_DISK_MAJOR(i)	SD_MAJOR((i) >> 4)
+
 static int sd_attach(Scsi_Device * SDp)
 {
         unsigned int devnum;
@@ -1306,6 +1317,14 @@ static int sd_attach(Scsi_Device * SDp)
 	printk("Attached scsi %sdisk %s at scsi%d, channel %d, id %d, lun %d\n",
 	       SDp->removable ? "removable " : "",
 	       nbuff, SDp->host->host_no, SDp->channel, SDp->id, SDp->lun);
+
+	if (SDp->host->hostt->vary_io) {
+		if (blkdev_varyio[SD_DISK_MAJOR(i)] == NULL) {
+			blkdev_varyio[SD_DISK_MAJOR(i)] = 
+				sd_varyio + ((i / SCSI_DISKS_PER_MAJOR) << 8);
+		}
+		memset(blkdev_varyio[SD_DISK_MAJOR(i)] + (devnum << 4), 1, 16);
+	}
 	return 0;
 }
 
@@ -1438,6 +1457,7 @@ static void __exit exit_sd(void)
 		kfree(sd_sizes);
 		kfree(sd_blocksizes);
 		kfree(sd_hardsizes);
+		kfree(sd_varyio);
 		for (i = 0; i < N_USED_SD_MAJORS; i++) {
 			kfree(sd_gendisks[i].de_arr);
 			kfree(sd_gendisks[i].flags);
diff -urNp x-ref/fs/buffer.c x/fs/buffer.c
--- x-ref/fs/buffer.c	2003-01-30 16:58:50.000000000 +0100
+++ x/fs/buffer.c	2003-01-30 16:58:54.000000000 +0100
@@ -2192,9 +2192,9 @@ static int wait_kio(int rw, int nr, stru
 	err = 0;
 
 	for (i = nr; --i >= 0; ) {
-		iosize += size;
 		tmp = bh[i];
 		wait_on_buffer(tmp);
+		iosize += tmp->b_size;
 		
 		if (!buffer_uptodate(tmp)) {
 			/* We are traversing bh'es in reverse order so
@@ -2236,6 +2236,7 @@ int brw_kiovec(int rw, int nr, struct ki
 	struct kiobuf *	iobuf = NULL;
 	struct page *	map;
 	struct buffer_head *tmp, **bhs = NULL;
+	int iosize = size;
 
 	if (!nr)
 		return 0;
@@ -2272,7 +2273,7 @@ int brw_kiovec(int rw, int nr, struct ki
 			}
 			
 			while (length > 0) {
-				blocknr = b[bufind++];
+				blocknr = b[bufind];
 				if (blocknr == -1UL) {
 					if (rw == READ) {
 						/* there was an hole in the filesystem */
@@ -2280,14 +2281,28 @@ int brw_kiovec(int rw, int nr, struct ki
 						flush_dcache_page(map);
 						kunmap(map);
 
+						iosize = size;
 						transferred += size;
 						goto skip_block;
 					} else
 						BUG();
 				}
+				if (iobuf->varyio &&
+				    (!(offset & RAWIO_BLOCKMASK))) {
+					int block_iter;
+					iosize = RAWIO_BLOCKSIZE; 
+					if (iosize > length)
+						iosize = length;
+					for (block_iter = 1; block_iter < iosize / size; block_iter++) {
+						if (blocknr + block_iter * (size >> 9) != b[bufind + block_iter]) {
+							iosize = size;
+							break;
+						}
+					}
+				}
 				tmp = bhs[bhind++];
 
-				tmp->b_size = size;
+				tmp->b_size = iosize;
 				set_bh_page(tmp, map, offset);
 				tmp->b_this_page = tmp;
 
@@ -2303,7 +2318,11 @@ int brw_kiovec(int rw, int nr, struct ki
 					set_bit(BH_Uptodate, &tmp->b_state);
 
 				atomic_inc(&iobuf->io_count);
-				submit_bh(rw, tmp);
+				if (iobuf->varyio) {
+					tmp->b_blocknr *= size >> 9;
+					submit_bh_blknr(rw, tmp);
+				} else 
+					submit_bh(rw, tmp);
 				/* 
 				 * Wait for IO if we have got too much 
 				 */
@@ -2318,8 +2337,9 @@ int brw_kiovec(int rw, int nr, struct ki
 				}
 
 			skip_block:
-				length -= size;
-				offset += size;
+				bufind += iosize / size;
+				length -= iosize;
+				offset += iosize;
 
 				if (offset >= PAGE_SIZE) {
 					offset = 0;
diff -urNp x-ref/fs/iobuf.c x/fs/iobuf.c
--- x-ref/fs/iobuf.c	2002-11-29 02:23:15.000000000 +0100
+++ x/fs/iobuf.c	2003-01-30 16:58:54.000000000 +0100
@@ -31,6 +31,7 @@ static int kiobuf_init(struct kiobuf *io
 	iobuf->array_len = 0;
 	iobuf->nr_pages = 0;
 	iobuf->locked = 0;
+	iobuf->varyio = 0;
 	iobuf->bh = NULL;
 	iobuf->blocks = NULL;
 	atomic_set(&iobuf->io_count, 0);
diff -urNp x-ref/include/linux/blkdev.h x/include/linux/blkdev.h
--- x-ref/include/linux/blkdev.h	2003-01-29 06:14:23.000000000 +0100
+++ x/include/linux/blkdev.h	2003-01-30 16:58:54.000000000 +0100
@@ -241,6 +241,8 @@ extern int * max_sectors[MAX_BLKDEV];
 
 extern int * max_segments[MAX_BLKDEV];
 
+extern char * blkdev_varyio[MAX_BLKDEV];
+
 #define MAX_SEGMENTS 128
 #define MAX_SECTORS 255
 
@@ -294,4 +296,12 @@ static inline unsigned int block_size(kd
 	return retval;
 }
 
+static inline int get_blkdev_varyio(kdev_t dev)
+{
+	int major = MAJOR(dev), minor = MINOR(dev);
+
+	if (blkdev_varyio[major])
+		return blkdev_varyio[major][minor];
+	return 0;
+}
 #endif
diff -urNp x-ref/include/linux/fs.h x/include/linux/fs.h
--- x-ref/include/linux/fs.h	2003-01-30 16:58:48.000000000 +0100
+++ x/include/linux/fs.h	2003-01-30 16:58:54.000000000 +0100
@@ -1400,7 +1400,15 @@ extern void file_move(struct file *f, st
 extern struct buffer_head * get_hash_table(kdev_t, int, int);
 extern struct buffer_head * getblk(kdev_t, int, int);
 extern void ll_rw_block(int, int, struct buffer_head * bh[]);
-extern void submit_bh(int, struct buffer_head *);
+extern void __submit_bh(int, struct buffer_head *, unsigned long);
+static inline void submit_bh(int rw, struct buffer_head * bh)
+{
+	__submit_bh(rw, bh, bh->b_blocknr * (bh->b_size >> 9));
+}
+static inline void submit_bh_blknr(int rw, struct buffer_head * bh)
+{
+	__submit_bh(rw, bh, bh->b_blocknr);
+}
 extern int is_read_only(kdev_t);
 extern void __brelse(struct buffer_head *);
 static inline void brelse(struct buffer_head *buf)
diff -urNp x-ref/include/linux/iobuf.h x/include/linux/iobuf.h
--- x-ref/include/linux/iobuf.h	2002-11-29 02:23:18.000000000 +0100
+++ x/include/linux/iobuf.h	2003-01-30 16:58:54.000000000 +0100
@@ -28,6 +28,9 @@
 #define KIO_STATIC_PAGES	(KIO_MAX_ATOMIC_IO / (PAGE_SIZE >> 10) + 1)
 #define KIO_MAX_SECTORS		(KIO_MAX_ATOMIC_IO * 2)
 
+#define RAWIO_BLOCKSIZE		4096
+#define RAWIO_BLOCKMASK		(RAWIO_BLOCKSIZE-1)
+
 /* The main kiobuf struct used for all our IO! */
 
 struct kiobuf 
@@ -37,7 +40,8 @@ struct kiobuf 
 	int		offset;		/* Offset to start of valid data */
 	int		length;		/* Number of valid bytes of data */
 
-	unsigned int	locked : 1;	/* If set, pages has been locked */
+	unsigned int	locked : 1,	/* If set, pages has been locked */
+			varyio : 1;	/* If set, do variable size IO */
 
 	struct page **  maplist;
 	struct buffer_head ** bh;
diff -urNp x-ref/kernel/ksyms.c x/kernel/ksyms.c
--- x-ref/kernel/ksyms.c	2003-01-30 16:58:46.000000000 +0100
+++ x/kernel/ksyms.c	2003-01-30 16:58:54.000000000 +0100
@@ -206,7 +206,7 @@ EXPORT_SYMBOL(bread);
 EXPORT_SYMBOL(__brelse);
 EXPORT_SYMBOL(__bforget);
 EXPORT_SYMBOL(ll_rw_block);
-EXPORT_SYMBOL(submit_bh);
+EXPORT_SYMBOL(__submit_bh);
 EXPORT_SYMBOL(unlock_buffer);
 EXPORT_SYMBOL(__wait_on_buffer);
 EXPORT_SYMBOL(___wait_on_page);
@@ -327,6 +327,7 @@ EXPORT_SYMBOL(init_buffer);
 EXPORT_SYMBOL(refile_buffer);
 EXPORT_SYMBOL(max_sectors);
 EXPORT_SYMBOL(max_readahead);
+EXPORT_SYMBOL(blkdev_varyio);
 
 /* tty routines */
 EXPORT_SYMBOL(tty_hangup);