📄 playmidi.c
字号:
/* TiMidity -- Experimental MIDI to WAVE converter Copyright (C) 1995 Tuukka Toivonen <toivonen@clinet.fi> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. playmidi.c -- random stuff in need of rearrangement*/#include <stdio.h>#include <stdlib.h>#include <string.h>#include <SDL_rwops.h>#include "config.h"#include "common.h"#include "instrum.h"#include "playmidi.h"#include "readmidi.h"#include "output.h"#include "mix.h"#include "ctrlmode.h"#include "timidity.h"#include "tables.h"static int dont_cspline=0;static int opt_dry = 1;static int opt_expression_curve = 2;static int opt_volume_curve = 2;static int opt_stereo_surround = 0;static int dont_filter_melodic=1;static int dont_filter_drums=1;static int dont_chorus=0;static int dont_reverb=0;static int current_interpolation=1;static int dont_keep_looping=0;static int voice_reserve=0;Channel channel[MAXCHAN];Voice voice[MAX_VOICES];signed char drumvolume[MAXCHAN][MAXNOTE];signed char drumpanpot[MAXCHAN][MAXNOTE];signed char drumreverberation[MAXCHAN][MAXNOTE];signed char drumchorusdepth[MAXCHAN][MAXNOTE];int voices=DEFAULT_VOICES;int32 control_ratio=0, amplification=DEFAULT_AMPLIFICATION;FLOAT_T master_volume;int32 drumchannels=DEFAULT_DRUMCHANNELS;int adjust_panning_immediately=0;struct _MidiSong { int32 samples; MidiEvent *events;};static int midi_playing = 0;static int32 lost_notes, cut_notes;static int32 *buffer_pointer;static int32 buffered_count;extern int32 *common_buffer;extern resample_t *resample_buffer; /* to free it on Timidity_Close */static MidiEvent *event_list, *current_event;static int32 sample_count, current_sample;int GM_System_On=0;int XG_System_On=0;int GS_System_On=0;int XG_System_reverb_type;int XG_System_chorus_type;int XG_System_variation_type;static void adjust_amplification(void){ master_volume = (FLOAT_T)(amplification) / (FLOAT_T)100.0; master_volume /= 2;}static void adjust_master_volume(int32 vol){ master_volume = (double)(vol*amplification) / 1638400.0L; master_volume /= 2;}static void reset_voices(void){ int i; for (i=0; i<MAX_VOICES; i++) voice[i].status=VOICE_FREE;}/* Process the Reset All Controllers event */static void reset_controllers(int c){ channel[c].volume=90; /* Some standard says, although the SCC docs say 0. */ channel[c].expression=127; /* SCC-1 does this. */ channel[c].sustain=0; channel[c].pitchbend=0x2000; channel[c].pitchfactor=0; /* to be computed */ channel[c].reverberation = 0; channel[c].chorusdepth = 0;}static void redraw_controllers(int c){ ctl->volume(c, channel[c].volume); ctl->expression(c, channel[c].expression); ctl->sustain(c, channel[c].sustain); ctl->pitch_bend(c, channel[c].pitchbend);}static void reset_midi(void){ int i; for (i=0; i<MAXCHAN; i++) { reset_controllers(i); /* The rest of these are unaffected by the Reset All Controllers event */ channel[i].program=default_program; channel[i].panning=NO_PANNING; channel[i].pitchsens=2; channel[i].bank=0; /* tone bank or drum set */ channel[i].harmoniccontent=64, channel[i].releasetime=64, channel[i].attacktime=64, channel[i].brightness=64, channel[i].sfx=0; } reset_voices();}static void select_sample(int v, Instrument *ip){ int32 f, cdiff, diff, midfreq; int s,i; Sample *sp, *closest; s=ip->samples; sp=ip->sample; if (s==1) { voice[v].sample=sp; return; } f=voice[v].orig_frequency; /* No suitable sample found! We'll select the sample whose root frequency is closest to the one we want. (Actually we should probably convert the low, high, and root frequencies to MIDI note values and compare those.) */ cdiff=0x7FFFFFFF; closest=sp=ip->sample; midfreq = (sp->low_freq + sp->high_freq) / 2; for(i=0; i<s; i++) { diff=sp->root_freq - f; /* But the root freq. can perfectly well lie outside the keyrange * frequencies, so let's try: */ /* diff=midfreq - f; */ if (diff<0) diff=-diff; if (diff<cdiff) { cdiff=diff; closest=sp; } sp++; } voice[v].sample=closest; return;}static void select_stereo_samples(int v, InstrumentLayer *lp){ Instrument *ip; InstrumentLayer *nlp, *bestvel; int diffvel, midvel, mindiff;/* select closest velocity */ bestvel = lp; mindiff = 500; for (nlp = lp; nlp; nlp = nlp->next) { midvel = (nlp->hi + nlp->lo)/2; if (!midvel) diffvel = 127; else if (voice[v].velocity < nlp->lo || voice[v].velocity > nlp->hi) diffvel = 200; else diffvel = voice[v].velocity - midvel; if (diffvel < 0) diffvel = -diffvel; if (diffvel < mindiff) { mindiff = diffvel; bestvel = nlp; } } ip = bestvel->instrument; if (ip->right_sample) { ip->sample = ip->right_sample; ip->samples = ip->right_samples; select_sample(v, ip); voice[v].right_sample = voice[v].sample; } else voice[v].right_sample = 0; ip->sample = ip->left_sample; ip->samples = ip->left_samples; select_sample(v, ip);}static void recompute_freq(int v){ int sign=(voice[v].sample_increment < 0), /* for bidirectional loops */ pb=channel[voice[v].channel].pitchbend; double a; if (!voice[v].sample->sample_rate) return; if (voice[v].vibrato_control_ratio) { /* This instrument has vibrato. Invalidate any precomputed sample_increments. */ int i=VIBRATO_SAMPLE_INCREMENTS; while (i--) voice[v].vibrato_sample_increment[i]=0; } if (pb==0x2000 || pb<0 || pb>0x3FFF) voice[v].frequency=voice[v].orig_frequency; else { pb-=0x2000; if (!(channel[voice[v].channel].pitchfactor)) { /* Damn. Somebody bent the pitch. */ int32 i=pb*channel[voice[v].channel].pitchsens; if (pb<0) i=-i; channel[voice[v].channel].pitchfactor= (FLOAT_T)(bend_fine[(i>>5) & 0xFF] * bend_coarse[i>>13]); } if (pb>0) voice[v].frequency= (int32)(channel[voice[v].channel].pitchfactor * (double)(voice[v].orig_frequency)); else voice[v].frequency= (int32)((double)(voice[v].orig_frequency) / channel[voice[v].channel].pitchfactor); } a = FSCALE(((double)(voice[v].sample->sample_rate) * (double)(voice[v].frequency)) / ((double)(voice[v].sample->root_freq) * (double)(play_mode->rate)), FRACTION_BITS); if (sign) a = -a; /* need to preserve the loop direction */ voice[v].sample_increment = (int32)(a);}static int expr_curve[128] = { 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 16, 16, 17, 17, 17, 18, 18, 19, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 28, 28, 29, 30, 30, 31, 32, 32, 33, 34, 35, 35, 36, 37, 38, 39, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 53, 54, 55, 56, 57, 59, 60, 61, 63, 64, 65, 67, 68, 70, 71, 73, 75, 76, 78, 80, 82, 83, 85, 87, 89, 91, 93, 95, 97, 99, 102, 104, 106, 109, 111, 113, 116, 118, 121, 124, 127 };static int panf(int pan, int speaker, int separation){ int val; val = abs(pan - speaker); val = (val * 127) / separation; val = 127 - val; if (val < 0) val = 0; if (val > 127) val = 127; return expr_curve[val];}static int vcurve[128] = {0,0,18,29,36,42,47,51,55,58,60,63,65,67,69,71,73,74,76,77,79,80,81,82,83,84,85,86,87,88,89,90,91,92,92,93,94,95,95,96,97,97,98,99,99,100,100,101,101,102,103,103,104,104,105,105,106,106,106,107,107,108,108,109,109,109,110,110,111,111,111,112,112,112,113,113,114,114,114,115,115,115,116,116,116,116,117,117,117,118,118,118,119,119,119,119,120,120,120,120,121,121,121,122,122,122,122,123,123,123,123,123,124,124,124,124,125,125,125,125,126,126,126,126,126,127,127,127};static void recompute_amp(int v){ int32 tempamp; int chan = voice[v].channel; int panning = voice[v].panning; int vol = channel[chan].volume; int expr = channel[chan].expression; int vel = vcurve[voice[v].velocity]; int drumpan = NO_PANNING; FLOAT_T curved_expression, curved_volume; if (channel[chan].kit) { int note = voice[v].sample->note_to_use; if (note>0 && drumvolume[chan][note]>=0) vol = drumvolume[chan][note]; if (note>0 && drumpanpot[chan][note]>=0) panning = drumvolume[chan][note]; } if (opt_expression_curve == 2) curved_expression = 127.0 * vol_table[expr]; else if (opt_expression_curve == 1) curved_expression = 127.0 * expr_table[expr]; else curved_expression = (FLOAT_T)expr; if (opt_volume_curve == 2) curved_volume = 127.0 * vol_table[vol]; else if (opt_volume_curve == 1) curved_volume = 127.0 * expr_table[vol]; else curved_volume = (FLOAT_T)vol; tempamp= (int32)((FLOAT_T)vel * curved_volume * curved_expression); /* 21 bits */ /* TODO: use fscale */ if (num_ochannels > 1) { if (panning > 60 && panning < 68) { voice[v].panned=PANNED_CENTER; if (num_ochannels == 6) voice[v].left_amp = FSCALENEG((double) (tempamp) * voice[v].sample->volume * master_volume, 20); else voice[v].left_amp= FSCALENEG((double)(tempamp) * voice[v].sample->volume * master_volume, 21); } else if (panning<5) { voice[v].panned = PANNED_LEFT; voice[v].left_amp= FSCALENEG((double)(tempamp) * voice[v].sample->volume * master_volume, 20); } else if (panning>123) { voice[v].panned = PANNED_RIGHT; voice[v].left_amp= /* left_amp will be used */ FSCALENEG((double)(tempamp) * voice[v].sample->volume * master_volume, 20); } else { FLOAT_T refv = (double)(tempamp) * voice[v].sample->volume * master_volume; int wide_panning = 64; if (num_ochannels == 4) wide_panning = 95; voice[v].panned = PANNED_MYSTERY; voice[v].lfe_amp = FSCALENEG(refv * 64, 27); switch (num_ochannels) { case 2: voice[v].lr_amp = 0; voice[v].left_amp = FSCALENEG(refv * (128-panning), 27); voice[v].ce_amp = 0; voice[v].right_amp = FSCALENEG(refv * panning, 27); voice[v].rr_amp = 0; break; case 4: voice[v].lr_amp = FSCALENEG(refv * panf(panning, 0, wide_panning), 27); voice[v].left_amp = FSCALENEG(refv * panf(panning, 32, wide_panning), 27); voice[v].ce_amp = 0; voice[v].right_amp = FSCALENEG(refv * panf(panning, 95, wide_panning), 27); voice[v].rr_amp = FSCALENEG(refv * panf(panning, 128, wide_panning), 27); break; case 6: voice[v].lr_amp = FSCALENEG(refv * panf(panning, 0, wide_panning), 27); voice[v].left_amp = FSCALENEG(refv * panf(panning, 32, wide_panning), 27); voice[v].ce_amp = FSCALENEG(refv * panf(panning, 64, wide_panning), 27); voice[v].right_amp = FSCALENEG(refv * panf(panning, 95, wide_panning), 27); voice[v].rr_amp = FSCALENEG(refv * panf(panning, 128, wide_panning), 27); break; } } } else { voice[v].panned=PANNED_CENTER; voice[v].left_amp= FSCALENEG((double)(tempamp) * voice[v].sample->volume * master_volume, 21); }}#define NOT_CLONE 0#define STEREO_CLONE 1#define REVERB_CLONE 2#define CHORUS_CLONE 3/* just a variant of note_on() */static int vc_alloc(int j){ int i=voices; while (i--) { if (i == j) continue; if (voice[i].status & VOICE_FREE) { return i; } } return -1;}static void kill_note(int i);static void kill_others(int i){ int j=voices; if (!voice[i].sample->exclusiveClass) return; while (j--) { if (voice[j].status & (VOICE_FREE|VOICE_OFF|VOICE_DIE)) continue; if (i == j) continue; if (voice[i].channel != voice[j].channel) continue; if (voice[j].sample->note_to_use) { if (voice[j].sample->exclusiveClass != voice[i].sample->exclusiveClass) continue; kill_note(j); } }}static void clone_voice(Instrument *ip, int v, MidiEvent *e, int clone_type, int variationbank){ int w, played_note, chorus=0, reverb=0, milli; int chan = voice[v].channel; if (clone_type == STEREO_CLONE) { if (!voice[v].right_sample && variationbank != 3) return; if (variationbank == 6) return; } if (channel[chan].kit) { reverb = drumreverberation[chan][voice[v].note]; chorus = drumchorusdepth[chan][voice[v].note]; } else { reverb = channel[chan].reverberation; chorus = channel[chan].chorusdepth; } if (clone_type == REVERB_CLONE) chorus = 0; else if (clone_type == CHORUS_CLONE) reverb = 0; else if (clone_type == STEREO_CLONE) reverb = chorus = 0; if (reverb > 127) reverb = 127; if (chorus > 127) chorus = 127; if (clone_type == CHORUS_CLONE) { if (variationbank == 32) chorus = 30; else if (variationbank == 33) chorus = 60; else if (variationbank == 34) chorus = 90; } chorus /= 2; /* This is an ad hoc adjustment. */ if (!reverb && !chorus && clone_type != STEREO_CLONE) return; if ( (w = vc_alloc(v)) < 0 ) return; voice[w] = voice[v]; if (clone_type==STEREO_CLONE) voice[v].clone_voice = w; voice[w].clone_voice = v; voice[w].clone_type = clone_type; voice[w].sample = voice[v].right_sample; voice[w].velocity= e->b; milli = play_mode->rate/1000; if (clone_type == STEREO_CLONE) { int left, right, leftpan, rightpan; int panrequest = voice[v].panning; if (variationbank == 3) { voice[v].panning = 0; voice[w].panning = 127; } else { if (voice[v].sample->panning > voice[w].sample->panning) { left = w; right = v; } else { left = v; right = w; }#define INSTRUMENT_SEPARATION 12 leftpan = panrequest - INSTRUMENT_SEPARATION / 2; rightpan = leftpan + INSTRUMENT_SEPARATION; if (leftpan < 0) { leftpan = 0; rightpan = leftpan + INSTRUMENT_SEPARATION; } if (rightpan > 127) { rightpan = 127; leftpan = rightpan - INSTRUMENT_SEPARATION; } voice[left].panning = leftpan; voice[right].panning = rightpan; voice[right].echo_delay = 20 * milli; } } voice[w].volume = voice[w].sample->volume; if (reverb) { if (opt_stereo_surround) { if (voice[w].panning > 64) voice[w].panning = 127; else voice[w].panning = 0; } else { if (voice[v].panning < 64) voice[w].panning = 64 + reverb/2; else voice[w].panning = 64 - reverb/2; }/* try 98->99 for melodic instruments ? (bit much for percussion) */ voice[w].volume *= vol_table[(127-reverb)/8 + 98]; voice[w].echo_delay += reverb * milli; voice[w].envelope_rate[DECAY] *= 2; voice[w].envelope_rate[RELEASE] /= 2; if (XG_System_reverb_type >= 0) { int subtype = XG_System_reverb_type & 0x07; int rtype = XG_System_reverb_type >>3; switch (rtype) { case 0: /* no effect */ break; case 1: /* hall */ if (subtype) voice[w].echo_delay += 100 * milli; break; case 2: /* room */ voice[w].echo_delay /= 2;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -